query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Evaluate function for embedding network.
def evaluate(embed_net, trials_loader, mode="norm2", eval_L=300, num_eval=6, device=None, batch_size=30, embed_norm=True, trials_feat=None, load_wav=None): def _prefeat(embed_net, trials_loader, L, num_eval, batch_size, embed_norm, device, load_wav=None): if load_wav is None: def load_trial_wav(path): return loadWAV( path, L=L, evalmode=True, num_eval=num_eval) else: load_trial_wav = load_wav wav_file = [[trials_loader.dataset[i]["enroll"], trials_loader.dataset[i]["test"]] for i in range(len(trials_loader.dataset))] wav_file = sorted(list(set(np.concatenate(wav_file).tolist()))) trials = waveform(wav_file, load_trial_wav) def collate_fn(batch): """collect from a batch of VoxWave Dataset.""" file = [item["file"] for item in batch] wave = torch.cat([item["wave"] for item in batch], dim=0) return {"file": file, "wave": wave} trialoader = DataLoader(trials, batch_size=batch_size, num_workers=5, collate_fn=collate_fn, shuffle=False) trials_feat = {} with torch.no_grad(): embed_net.to(device) embed_net.eval() for data in tqdm(trialoader, total=len(trialoader)): file = data["file"] wave = data["wave"].to(device) feat = embed_net(wave) if embed_norm is True: feat = F.normalize(feat, p=2, dim=1).detach().cpu() for i, j in enumerate(range(0, feat.shape[0], num_eval)): trials_feat[file[i]] = feat[j: j + num_eval].clone() return trials_feat if trials_feat is None: trials_feat = _prefeat(embed_net, trials_loader, eval_L, num_eval, batch_size, embed_norm, device, load_wav) all_scores = [] all_labels = [] all_trials = [] for trial in tqdm(trials_loader, total=len(trials_loader)): label = trial["label"] enroll = trial["enroll"] test = trial["test"] for i in range(len(label)): enroll_embed = trials_feat[enroll[i]] test_embed = trials_feat[test[i]] score = calculate_score(enroll_embed.to(device), test_embed.to(device), mode=mode, num_eval=num_eval) all_scores.append(score) all_trials.append([enroll[i], test[i]]) all_labels.extend(label.numpy().tolist()) all_scores = np.array(all_scores) all_labels = np.array(all_labels) eer, thresh = calculate_eer(all_labels, all_scores, 1) return eer, thresh, all_scores, all_labels, all_trials, trials_feat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(self, prediction_fn):\n pass", "def evaluate():\n model.eval()\n with torch.no_grad():\n loss, n = 0, 0\n for xb, yb in valid_dl:\n n += len(xb)\n loss += loss_func(model(xb), yb) * len(xb)\n\n return loss/n", "def evaluate(model, g, val_nid, device):\n model.eval()\n nfeat = g.ndata['features']\n labels = g.ndata['labels']\n with th.no_grad():\n pred = model.module.inference(g, nfeat, device, args.batch_size, args.num_workers)\n model.train()\n test_acc = Accuracy()\n return test_acc(th.softmax(pred[val_nid], -1), labels[val_nid].to(pred.device))", "def evaluate(embed_model, model, pt, dataset, batch_size):\n\n embed_model.eval()\n model.eval()\n lossf = nn.NLLLoss(size_average=False)\n\n correct = 0\n total = 0\n total_loss = 0\n print('Start Evaluating!')\n print('Validation Size: {}'.format(len(dataset)))\n\n threshold = 0.3\n\n data_iter = iter(pt.batch_iter(dataset, batch_size))\n\n for i in range(len(dataset) // batch_size):\n\n # catch the data\n p1_idx, p2_idx, _, _, label = next(data_iter)\n\n p1_idx = Variable(p1_idx)\n p2_idx = Variable(p2_idx)\n label = Variable(label)\n\n if use_cuda:\n p1_idx = p1_idx.cuda()\n p2_idx = p2_idx.cuda()\n label = label.cuda()\n\n # Feed to the network\n p1_emb, p2_emb = embed_model(p1_idx, p2_idx)\n out = model(p1_emb, p2_emb)\n\n # print(label)\n # print(out)\n\n loss = lossf(out, label)\n total_loss += loss.data[0]\n\n prob = torch.exp(out)\n predicted = Variable(torch.LongTensor([1 if l[1].data[0] >= threshold else 0 for l in prob]))\n\n # _, predicted = torch.max(out, dim=1)\n total += p1_idx.size()[0]\n\n # print(predicted)\n\n correct += torch.sum((label == predicted), dim=0).data[0]\n\n print('Correct Labels: {}/{}'.format(correct, (i + 1) * batch_size))\n\n print('Valid Loss: {}, Acc: {}'.format(total_loss / float(total),\n correct / float(total)))", "def val_func(self, data, label):\r\n self.net.eval()\r\n\r\n with torch.no_grad():\r\n outputs, losses = self.forward(data, label)\r\n\r\n return outputs, losses", "def eval(self):\n\n # parameters initialize\n torch = import_optional_dependency(\"torch\")\n eval_total = 0\n eval_correct = 0\n eval_loss = 0\n self._set_eval()\n\n # display the information\n if self.info:\n print(f\"\\rEvaluating...\", end=\"\")\n\n # start eval part\n for i, (source, target) in enumerate(self.eval_dataset):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n result = self.model(source)\n eval_loss += self.criterion(result, target).item()\n _, predicted = torch.max(result.data, 1)\n eval_total += target.size(0)\n eval_correct += (predicted == target).sum().item()\n\n accuracy = eval_correct / eval_total\n eval_loss = eval_loss / eval_total\n\n if self.info:\n print(f\"\\rEvaluation loss: { eval_loss } | Accuracy: { accuracy }\")\n\n return eval_loss, accuracy", "def evaluate():\n sess = tf.Session()\n tf.logging.info(\"Building graph...\")\n\n embeddings = load_embeddings()\n tf_data = load_batched_dataset(False, embeddings)\n it = tf_data.make_initializable_iterator()\n features, labels = it.get_next()\n\n logits = predict(False, embeddings, features[\"premise\"],\n features[\"hypothesis\"])\n accuracy, update_ops = tf.metrics.accuracy(\n tf.argmax(logits, 1, output_type=tf.int32), tf.to_int32(labels))\n\n tf.logging.info(\"Running initializers...\")\n checkpoint_file = FLAGS.checkpoint_file\n if checkpoint_file is not None:\n saver = tf.train.Saver(tf.trainable_variables())\n tf.logging.info(\"Restoring from checkpoint: \" + checkpoint_file)\n saver.restore(sess, checkpoint_file)\n else:\n tf.logging.warning(\"No checkpoint given, evaling model with random weights\")\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n sess.run(tf.tables_initializer())\n sess.run(it.initializer)\n\n tf.logging.info(\"Starting loop....\")\n while True:\n try:\n sess.run(update_ops)\n except tf.errors.OutOfRangeError:\n break\n tf.logging.info(\"Done\")\n\n accuracy = sess.run(accuracy)\n print(\"Accuracy: %f\" % accuracy)", "def _evaluate(self, X, Y):\n # evaluate all networks\n #\n # evaluations = torch.zeros(self.population_size, device=self.device)\n evaluations = torch.zeros(self.population_size, device=self.device)\n\n for i in range(self.population_size):\n selected_pheno = self.population[i].cpu()\n # if IDCT is to be used first transform the vector, then use it to assemble the network\n if self.IDCT_from is not None:\n selected_pheno = torch.tensor(\n fftpack.idct(np.array(selected_pheno), n=self.model.total_parameters(), norm=\"ortho\"))\n fill_weights(self.model, selected_pheno.to(self.device))\n # evaluate\n predicted = self.model.forward(X)\n evaluations[i] = self.loss_function(predicted, Y)\n return evaluations", "def evaluate(self):\n weight, bias, emb = self.sess.run([self.sm_w_t, self.sm_b, self.emb])\n return utils.pp(weight, bias, emb, self.test_data)", "def evaluate(network, loss_function, softmax_function, test_loader, test_set_size):\n running_loss = 0.0\n confusion_matrix = { # Of shape [predicted value][real value]\n 0: {0: 0, 1: 0, 2: 0},\n 1: {0: 0, 1: 0, 2: 0},\n 2: {0: 0, 1: 0, 2: 0},\n }\n batch_size = -1\n network.eval()\n with torch.no_grad():\n correct = 0\n for graph_batch, label_batch in test_loader:\n if batch_size == -1:\n batch_size = label_batch.size(0)\n logits = network(graph_batch, graph_batch.ndata['n_feat'], graph_batch.edata['e_feat'], 0, 0)\n running_loss += loss_function(logits, label_batch).detach().item()\n predicted_classes = torch.argmax(logits, dim=1).detach()\n correct += (predicted_classes == label_batch).sum().item()\n for predicted_class, label in zip(predicted_classes, label_batch):\n confusion_matrix[predicted_class.item()][label.item()] += 1\n\n if batch_size <= 0:\n print(\"Error : batch size is {}\".format(batch_size))\n exit(1)\n\n return correct / test_set_size, running_loss / len(test_loader), confusion_matrix", "def evaluate(self, batch_x, batch_y):\n raise NotImplementedError()", "def evaluate(self, input):\n\t\treturn self.function(np.dot(self.weights, np.array([-1] + list(input))))", "def evaluate(network: nn.Module, data: DataLoader, metric: callable) -> list:\n error_list = []\n with torch.no_grad():\n for x, y in data:\n \n prediction = network.forward(x)\n error_list.append(metric(prediction, y))\n \n return torch.tensor(error_list)", "def evaluate(self) -> Dict[str, float]:\n eval_dataloader = self.get_eval_dataloader()\n\n output = self._prediction_loop(eval_dataloader, description=\"Evaluation\")\n return output.metrics", "def eval_function(engine: Engine, batch: Tuple[torch.Tensor, torch.Tensor, List[int]]) -> \\\n Tuple[torch.Tensor, torch.Tensor]:\n model.eval()\n with torch.no_grad():\n x, y, lengths = batch\n x, y = x.to(model.device), y.to(model.device)\n y_pred: torch.Tensor = model(x, lengths)\n return y_pred, y", "def evaluate(self, inp):\n x = torch.unsqueeze(inp, 0)\n out = self.forward(x)\n return out.max(1)[1].item()", "def evaluate(self, corpus, data_source, args, criterion, iwae=False, num_importance_samples=None):\n # Turn on evaluation mode which disables dropout.\n self.eval()\n total_loss = 0\n ntokens = len(corpus)\n hidden = self.init_hidden(args.batch_size)\n for batch in data_source:\n data, targets = batch.text, batch.target\n output, hidden = self.forward(data, hidden)\n output_flat = output.view(-1, ntokens)\n total_loss += len(data) * args.batch_size * criterion(output_flat, targets.view(-1)).data\n hidden = repackage_hidden(hidden)\n return total_loss[0] / len(data_source.dataset[0].text)", "def evaluate(self):\n pass", "def evaluate(self):\n pass", "def evaluate(self, input):\n\t\tinput = np.array([-1] + list(input))\n\n\t\tfor layer in self.layers:\n\t\t\tinput = np.array([-1]+[ neuron.function(np.dot(neuron.weights, input)) for neuron in layer ])\n\n\t\treturn input[1:]", "def evaluate(forward_fn, val_loader, device, opt):\n inf_len = opt.nt_cond\n assert val_loader is not None and opt.n_iter_test <= len(val_loader)\n\n n = 0 # Total number of evaluation videos, updated in the validation loop\n global_psnr = 0 # Sum of all computed prediction PSNR\n with torch.no_grad():\n for j, batch in enumerate(val_loader):\n # Stop when the given number of iterations is reached\n if j >= opt.n_iter_test:\n break\n\n # Data\n x = batch.to(device)\n x_inf = x[:inf_len]\n nt = x.shape[0]\n n_b = x.shape[1]\n n += n_b\n\n # Perform a given number of predictions per video\n all_x = []\n for _ in range(opt.n_samples_test):\n all_x.append(forward_fn(x_inf, nt, dt=1 / opt.n_euler_steps)[0].cpu())\n all_x = torch.stack(all_x)\n\n # Sort predictions with respect to PSNR and select the closest one to the ground truth\n x_cpu = x.cpu()\n all_mse = torch.mean(F.mse_loss(all_x, x_cpu.expand_as(all_x), reduction='none'), dim=[4, 5])\n all_psnr = torch.mean(10 * torch.log10(1 / all_mse), dim=[1, 3])\n _, idx_best = all_psnr.max(0)\n x_ = all_x[idx_best, :, torch.arange(n_b).to(device)].transpose(0, 1).contiguous().to(device)\n\n # Compute the final PSNR score\n mse = torch.mean(F.mse_loss(x_, x, reduction='none'), dim=[3, 4])\n psnr = 10 * torch.log10(1 / mse)\n global_psnr += psnr[inf_len:].mean().item() * n_b\n\n # Average by batch\n return -global_psnr / n", "def evaluate(self, batch: RecordBatch) -> Any:\n processed_args = self._process_arguments(self._arguments, batch)\n if self._function:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n if self._is_binary_func:\n result = self._apply_binary_args_function(\n *processed_args\n )\n else:\n result = self._function(*processed_args)\n else:\n result = self._expr_kernel(processed_args, batch)\n\n return self._post_process_result(result)", "def evaluate(self, eval_data, eval_labels, eval_input_fn):\n raise NotImplementedError(\"Method must be implemented by subclass\")", "def evaluate():\n\t\t\t\tif not hasattr(evaluate, 'value'):\n\t\t\t\t\tevaluate.value = func()\n\t\t\t\treturn evaluate.value", "def evaluate(model,loss_fn, val_dataloader):\r\n # Put the model into the evaluation mode. The dropout layers are disabled during\r\n # the test time.\r\n model.eval()\r\n\r\n # Tracking variables\r\n val_accuracy = []\r\n val_loss = []\r\n\r\n # For each batch in our validation set...\r\n for batch in val_dataloader:\r\n # Load batch to GPU\r\n b_input_ids, b_attn_mask, b_labels = tuple(t.to(device) for t in batch)\r\n\r\n # Compute logits\r\n with torch.no_grad():\r\n logits = model(b_input_ids, b_attn_mask)\r\n\r\n # Compute loss\r\n loss = loss_fn(logits, b_labels)\r\n val_loss.append(loss.item())\r\n\r\n # Get the predictions\r\n preds = torch.argmax(logits, dim=1).flatten()\r\n\r\n # Calculate the accuracy rate\r\n accuracy = (preds == b_labels).cpu().numpy().mean() * 100\r\n val_accuracy.append(accuracy)\r\n\r\n # Compute the average accuracy and loss over the validation set.\r\n val_loss = np.mean(val_loss)\r\n val_accuracy = np.mean(val_accuracy)\r\n\r\n return val_loss, val_accuracy", "def evaluate(self):\n raise NotImplementedError()", "def evaluate(self) :\n pass", "def __call__(self, in_tensor):\n return self.evaluate(in_tensor)", "def evaluate(self, representativeness: float, weight: float) -> float:\n pass", "def evaluate():\n\n # seed provides the mechanism to control the shuffling which takes place reading input\n seed = tf.placeholder(tf.int64, shape=())\n \n # Generate placeholders for the images and labels.\n iterator = input_data.input_pipeline_binary(FLAGS.data_dir,\n FLAGS.batch_size,\n fake_data=FLAGS.fake_data,\n num_epochs=1,\n read_threads=FLAGS.read_threads,\n shuffle_size=FLAGS.shuffle_size,\n num_expected_examples=FLAGS.num_examples,\n seed=seed)\n image_path, label_path, images, labels = iterator.get_next()\n\n if FLAGS.verbose:\n print_op = tf.print(\"images and labels this batch: \", \n image_path, label_path, labels)\n else:\n print_op = tf.constant('No printing')\n\n if FLAGS.random_rotation:\n images, labels = harmonics.apply_random_rotation(images, labels)\n\n # Build a Graph that computes predictions from the inference model.\n logits = topology.inference(images, FLAGS.network_pattern)\n \n # Add to the Graph the Ops for loss calculation.\n loss = topology.binary_loss(logits, labels)\n \n # Set up some prediction statistics\n predicted = tf.round(tf.nn.sigmoid(logits))\n correct_pred = tf.equal(predicted, labels)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n \n while True:\n eval_once(sess, iterator, saver, seed, labels, loss, accuracy, predicted)\n if FLAGS.run_once:\n break\n time.sleep(FLAGS.eval_interval_secs)", "def _evaluate(self, w, x, y, z):\n raise NotImplementedError()", "def evaluate(self) -> int:", "def evaluate(self, dataset):\n return self.model.evaluate(dataset.X_val, dataset.y_val)", "def _evaluate(tensor):\n if context.executing_eagerly():\n return tensor.numpy()\n return ops.get_default_session().run(tensor)", "def _evaluate(model):\n _recompile(model)\n if isinstance(eval_dataset, tuple):\n eval_images, eval_labels = eval_dataset\n return model.evaluate(\n eval_images, eval_labels, verbose=verbose, return_dict=True)\n else:\n return model.evaluate(eval_dataset, verbose=verbose, return_dict=True)", "def evaluate(weights: fl.common.Weights) -> Optional[Tuple[float, float]]:\n model = models.load_model(glb.MODEL)\n model.set_weights(weights)\n model.to(DEVICE)\n testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False)\n # using pytorch for central evaluation, can be tensorflow as well\n return modules.pt_test(model, testloader, device=DEVICE)", "def evaluate(self, in_tensor):\n # Reset module list. In case the structure changes dynamically,\n # the module list is tracked for every forward pass.\n self.inverter.reset_module_list()\n self.prediction = self.model(in_tensor)\n return self.prediction", "def compute(self, *args, **kwargs):\n for node in self.evaluation_sequence:\n node.evaluate()", "def eval(self):\n raise NotImplementedError", "def evaluate():\n with tf.Graph().as_default() as g:\n \n # Get hazy and clean images for SYNTHIA.\n val = FLAGS.val\n hazy_images, clean_images_ground_truth, _ = model_spec.input(val)\n\n # Build a Graph that computes the dehazed predictions from the\n # inference model.\n clean_images_predicted = model_spec.inference(hazy_images)\n\n # Calculate loss (only the data term).\n loss = model_spec.data_loss(clean_images_predicted, clean_images_ground_truth)\n\n # Restore the moving average version of the learned variables for eval.\n variable_averages = tf.train.ExponentialMovingAverage(\n model_spec.MOVING_AVERAGE_DECAY)\n variables_to_restore = variable_averages.variables_to_restore()\n saver = tf.train.Saver(variables_to_restore)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.merge_all_summaries()\n\n summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)\n\n while True:\n eval_once(saver, summary_writer, loss, summary_op)\n if FLAGS.run_once:\n print('Finished one-off evaluation.')\n break\n time.sleep(FLAGS.eval_interval_secs)", "def evaluate(inputs, labels):\n # Your code here.\n _, probs = forward(inputs)\n preds = predict(probs)\n trues = np.argmax(labels, axis=1)\n return np.mean(preds == trues)", "def EvaluateFunction(self, p_float=..., p_float=..., p_float=...):\n ...", "def run(self, x):\n \"*** YOUR CODE HERE question 1 ***\"\n score = nn.DotProduct(self.w, x)\n\n return score", "def evaluate(self, data: dataset.Dataset, batch_size: int = 32) -> Any:\n ds = data.gen_tf_dataset(\n batch_size, is_training=False, preprocess=self._preprocess)\n return self._model.evaluate(ds)", "def Evaluate(self, input_data: np.ndarray) -> np.ndarray:\n if input_data.shape[0] != self.input_layer_size:\n raise IndexError(f\"Input data length is {input_data.shape[0]}, must match length of input layer size {self.input_layer_size}\")\n\n # Evaulate hidden layer given input values\n hidden_layer_values = np.zeros(self.hidden_layer_size, dtype=np.float32)\n for hidden_node_index in range(self.hidden_layer_size):\n node_value = 0\n for input_node_index in range(self.input_layer_size):\n node_value += input_data[input_node_index] * self.input_to_hidden_weights[input_node_index, hidden_node_index]\n hidden_layer_values[hidden_node_index] = sigmoid(node_value + self.hidden_layer_biases[hidden_node_index])\n\n # Evaulate output layer given hidden layer values\n output_layer_values = np.zeros(self.output_layer_size, dtype=np.float32)\n for output_node_index in range(self.output_layer_size):\n node_value = 0\n for hidden_node_index in range(self.hidden_layer_size):\n node_value += hidden_layer_values[hidden_node_index] * self.hidden_to_output_weights[hidden_node_index, output_node_index]\n output_layer_values[output_node_index] = sigmoid(node_value + self.output_layer_biases[output_node_index])\n\n return output_layer_values", "def evaluate(problem, batch_size, inference_fn, optimizer, model_dir=None):\n logging.info(\n 'Evaluating.\\nOperative config:\\n%s' % gin.operative_config_str())\n if model_dir is None:\n model_dir = default_model_dir()\n if model_dir is not None:\n model_dir = os.path.expanduser(model_dir)\n if not os.path.isdir(model_dir):\n raise RuntimeError('model_dir does not exist: %s' % model_dir)\n\n val_ds = problem.get_dataset('validation', batch_size)\n inputs = tf.nest.map_structure(\n lambda shape, dtype: tf.keras.layers.Input(\n shape=shape[1:], dtype=dtype),\n val_ds.output_shapes[0], val_ds.output_types[0])\n outputs = inference_fn(inputs, problem.output_spec())\n model = _model(inputs, outputs)\n model.compile(\n optimizer=optimizer,\n loss=problem.loss,\n metrics=problem.metrics)\n\n cb.restore_model(model, model_dir)\n validation_steps = batch_steps(\n problem.examples_per_epoch('validation'), batch_size)\n \n val_ds = val_ds.map(lambda x, y: (tuple(tf.nest.flatten(x)), y))\n\n logging.info('Running evaluation for %d steps' % validation_steps)\n return model.evaluate(val_ds, steps=validation_steps)", "def evaluate(self, w, X, y):\n pass # this is because it's a base class, it will be implemented below.", "def val(self, val_loader):\n\n self.model.eval()\n with torch.no_grad():\n return self._iteration(val_loader)", "def evaluate(self):\n raise NotImplementedError(\"Abstract method\")", "def evaluate(self, edict):\n pass", "def eval(self):\n self.mode = \"eval\"\n self.online_net.eval()", "def eval(self):\n self.mode = \"eval\"\n self.online_net.eval()", "def eval(self, x):\n\n if self.eval_f is None:\n self.eval_f = theano.function(\n inputs=[self.input],\n outputs=[self.y],\n givens=[(self.m, self.bm), (self.v, self.bv)]\n )\n\n return self.eval_f(x.astype(dtype))", "def evaluate(self):\n\t\t\t\tif not hasattr(evaluate, 'value'):\n\t\t\t\t\tevaluate.value = func(self)\n\t\t\t\treturn evaluate.value", "def evaluate(self, dataset):\n\t\tpass", "def eval(self, xx, d=None):\n if self.weights is None:\n self.compute_weights()\n\n val = 0\n for i in range(self.M):\n val += self.weights[i]*self.model_list[i].eval(xx, d)\n return val", "def evaluate(val_loader, model, epoch, cfg):\n if distributed.is_master_proc():\n print('-' * 89)\n print('Evaluation on val set epoch {:5d}'.format(epoch))\n print('-' * 89)\n \n # Enable eval mode.\n model.eval()\n sigmoid = nn.Sigmoid()\n\n # Use cuda if available\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n instances_iou = []\n pbar = tqdm(total=len(val_loader))\n for (batch_idx, (caption, boxes, instances, mask_features, annotations, ann_types, ann_categories, noun_phrases, grounding_instances, image_info)) in enumerate(val_loader):\n boxes = boxes.to(device)\n instances = instances.to(device)\n mask_features = mask_features.to(device)\n annotations = annotations.to(device)\n ann_types = ann_types.to(device)\n ann_categories = ann_categories.to(device)\n noun_phrases = noun_phrases.to(device)\n grounding_instances = grounding_instances.to(device)\n height = image_info['height'].to(device)\n width = image_info['width'].to(device)\n \n # Perform the forward pass\n scores = model(caption, boxes, mask_features, noun_phrases)\n\n if cfg.NUM_GPUS > 1:\n scores, grounding_instances, annotations, instances = distributed.all_gather(\n [scores, grounding_instances, annotations, instances]\n )\n height, width = distributed.all_gather(\n [height, width]\n )\n\n # Evaluation\n words_mask = ann_types == 1\n\n scores = torch.bmm(noun_phrases.transpose(1, 2), scores)\n scores = scores / (noun_phrases.sum(dim=1).unsqueeze(dim=2).repeat(1, 1, scores.shape[2]) + 0.0000001)\n \n scores = sigmoid(scores)\n index = torch.argmax(scores, dim=2).cpu().numpy()\n predictions = instances[torch.arange(instances.shape[0]).unsqueeze(-1), index]\n predictions = predictions[words_mask]\n targets = grounding_instances[words_mask]\n words_index = words_mask.nonzero()\n\n if len(predictions.shape) < 3:\n predictions = predictions.unsqueeze(0)\n targets = targets.unsqueeze(0)\n words_index = words_index.unsqueeze(0)\n\n plurals_mask = ann_types == 2\n for p in plurals_mask.nonzero():\n plural_instance = torch.zeros([predictions.shape[1], predictions.shape[2]]).to(device)\n if not cfg.TEST.ORACLE:\n plural_instances = (scores[p[0], p[1], :] > 0.1).nonzero()\n plural_instances = plural_instances.squeeze() if len(plural_instances.shape) > 1 else plural_instances\n else:\n plural_instances = annotations[p[0], p[1]].nonzero().squeeze()\n if plural_instances.nelement() > 0:\n plural_instance = instances[p[0], plural_instances]\n if len(plural_instance.shape) == 3:\n plural_instance, _ = plural_instance.max(dim=0)\n predictions = torch.cat([predictions, plural_instance.unsqueeze(0)])\n targets = torch.cat([targets, grounding_instances[p[0], p[1]].unsqueeze(0)])\n words_index = torch.cat([words_index, p.unsqueeze(0)])\n\n for p, t, (i, _) in zip(predictions, targets, words_index):\n mask_transform = Resize((int(height[i].cpu().item()), int(width[i].cpu().item())))\n p = mask_transform(p.unsqueeze(dim=0)).squeeze()\n t = mask_transform(t.unsqueeze(dim=0)).squeeze()\n _, _, instance_iou = compute_mask_IoU(p, t)\n instances_iou.append(instance_iou.cpu().item())\n\n if distributed.is_master_proc():\n pbar.update(1)\n if batch_idx % cfg.LOG_PERIOD == 0:\n tqdm.write('acc@0.5: {:.5f} | AA: {:.5f}'.format(accuracy_score(np.ones([len(instances_iou)]), np.array(instances_iou) > 0.5), average_accuracy(instances_iou))) \n\n pbar.close()\n\n # Final evaluation metrics\n AA = average_accuracy(instances_iou)\n accuracy = accuracy_score(np.ones([len(instances_iou)]), np.array(instances_iou) > 0.5)\n if distributed.is_master_proc():\n print('| epoch {:5d} | final acc@0.5: {:.5f} | final AA: {:.5f} |'.format(\n epoch,\n accuracy,\n AA))\n return AA", "def evaluate_batch(self, batch, stage):\n predictions = self.compute_forward(batch, stage=stage)\n with torch.no_grad():\n loss = self.compute_objectives(predictions, batch, stage=stage)\n return loss.detach()", "def eval(self):\n params = self.params\n langs = ['en', params.target_lang]\n self.encoder.eval()\n self.proj.eval()\n\n scores = OrderedDict({'epoch': self.epoch})\n\n for splt in ['valid', 'test']:\n\n for lang in langs:\n if lang == 'en' and splt == 'test' or lang != 'en' and splt == 'valid':\n continue\n lang_id = params.lang2id[lang if lang != 'jp' else 'ja']\n valid = 0\n total = 0\n\n for batch in self.get_iterator(splt, lang):\n # batch\n (sent1, len1), idx = batch\n # set max length to 256, avoid position embedding overflow and save time.\n x, lengths = truncate(sent1, len1, 256, params.eos_index)\n lang_ids = x.clone().fill_(lang_id)\n\n y = self.data[lang][splt]['y'][idx]\n\n # cuda\n x, y, lengths, lang_ids = to_cuda(x, y, lengths, lang_ids)\n\n # forward\n output = self.proj(self.encoder.get_embeddings(x, lengths, langs=lang_ids))\n predictions = output.data.max(1)[1]\n\n # update statistics\n valid += predictions.eq(y).sum().item()\n total += len(len1)\n\n # compute accuracy\n acc = 100.0 * valid / total\n scores['CLF_%s_%s_acc' % (splt, lang)] = acc\n logger.info(\"CLF - %s - %s - Epoch %i - Acc: %.1f%%\" % (splt, lang, self.epoch, acc))\n\n logger.info(\"__log__:%s\" % json.dumps(scores))\n return scores", "def evaluate(\n self,\n duration_fn: Optional[Callable[[int], int]] = None,\n ) -> dict:\n # Call the `_before_evaluate` hook.\n self._before_evaluate()\n\n if self.evaluation_dataset is not None:\n return {\"evaluation\": self._run_offline_evaluation()}\n\n # Sync weights to the evaluation WorkerSet.\n if self.evaluation_workers is not None:\n self.evaluation_workers.sync_weights(\n from_worker_or_learner_group=self.workers.local_worker()\n )\n self._sync_filters_if_needed(\n central_worker=self.workers.local_worker(),\n workers=self.evaluation_workers,\n config=self.evaluation_config,\n )\n\n self.callbacks.on_evaluate_start(algorithm=self)\n\n if self.config.custom_evaluation_function:\n logger.info(\n \"Running custom eval function {}\".format(\n self.config.custom_evaluation_function\n )\n )\n metrics = self.config.custom_evaluation_function(\n self, self.evaluation_workers\n )\n if not metrics or not isinstance(metrics, dict):\n raise ValueError(\n \"Custom eval function must return \"\n \"dict of metrics, got {}.\".format(metrics)\n )\n else:\n if (\n self.evaluation_workers is None\n and self.workers.local_worker().input_reader is None\n ):\n raise ValueError(\n \"Cannot evaluate w/o an evaluation worker set in \"\n \"the Algorithm or w/o an env on the local worker!\\n\"\n \"Try one of the following:\\n1) Set \"\n \"`evaluation_interval` >= 0 to force creating a \"\n \"separate evaluation worker set.\\n2) Set \"\n \"`create_env_on_driver=True` to force the local \"\n \"(non-eval) worker to have an environment to \"\n \"evaluate on.\"\n )\n\n # How many episodes/timesteps do we need to run?\n # In \"auto\" mode (only for parallel eval + training): Run as long\n # as training lasts.\n unit = self.config.evaluation_duration_unit\n eval_cfg = self.evaluation_config\n rollout = eval_cfg.rollout_fragment_length\n num_envs = eval_cfg.num_envs_per_worker\n auto = self.config.evaluation_duration == \"auto\"\n duration = (\n self.config.evaluation_duration\n if not auto\n else (self.config.evaluation_num_workers or 1)\n * (1 if unit == \"episodes\" else rollout)\n )\n agent_steps_this_iter = 0\n env_steps_this_iter = 0\n\n # Default done-function returns True, whenever num episodes\n # have been completed.\n if duration_fn is None:\n\n def duration_fn(num_units_done):\n return duration - num_units_done\n\n logger.info(f\"Evaluating current state of {self} for {duration} {unit}.\")\n\n metrics = None\n all_batches = []\n # No evaluation worker set ->\n # Do evaluation using the local worker. Expect error due to the\n # local worker not having an env.\n if self.evaluation_workers is None:\n # If unit=episodes -> Run n times `sample()` (each sample\n # produces exactly 1 episode).\n # If unit=ts -> Run 1 `sample()` b/c the\n # `rollout_fragment_length` is exactly the desired ts.\n iters = duration if unit == \"episodes\" else 1\n for _ in range(iters):\n batch = self.workers.local_worker().sample()\n agent_steps_this_iter += batch.agent_steps()\n env_steps_this_iter += batch.env_steps()\n if self.reward_estimators:\n all_batches.append(batch)\n metrics = collect_metrics(\n self.workers,\n keep_custom_metrics=eval_cfg.keep_per_episode_custom_metrics,\n timeout_seconds=eval_cfg.metrics_episode_collection_timeout_s,\n )\n\n # Evaluation worker set only has local worker.\n elif self.evaluation_workers.num_remote_workers() == 0:\n # If unit=episodes -> Run n times `sample()` (each sample\n # produces exactly 1 episode).\n # If unit=ts -> Run 1 `sample()` b/c the\n # `rollout_fragment_length` is exactly the desired ts.\n iters = duration if unit == \"episodes\" else 1\n for _ in range(iters):\n batch = self.evaluation_workers.local_worker().sample()\n agent_steps_this_iter += batch.agent_steps()\n env_steps_this_iter += batch.env_steps()\n if self.reward_estimators:\n all_batches.append(batch)\n\n # Evaluation worker set has n remote workers.\n elif self.evaluation_workers.num_healthy_remote_workers() > 0:\n # How many episodes have we run (across all eval workers)?\n num_units_done = 0\n _round = 0\n # In case all of the remote evaluation workers die during a round\n # of evaluation, we need to stop.\n while True and self.evaluation_workers.num_healthy_remote_workers() > 0:\n units_left_to_do = duration_fn(num_units_done)\n if units_left_to_do <= 0:\n break\n\n _round += 1\n unit_per_remote_worker = (\n 1 if unit == \"episodes\" else rollout * num_envs\n )\n # Select proper number of evaluation workers for this round.\n selected_eval_worker_ids = [\n worker_id\n for i, worker_id in enumerate(\n self.evaluation_workers.healthy_worker_ids()\n )\n if i * unit_per_remote_worker < units_left_to_do\n ]\n batches = self.evaluation_workers.foreach_worker(\n func=lambda w: w.sample(),\n local_worker=False,\n remote_worker_ids=selected_eval_worker_ids,\n timeout_seconds=self.config.evaluation_sample_timeout_s,\n )\n if len(batches) != len(selected_eval_worker_ids):\n logger.warning(\n \"Calling `sample()` on your remote evaluation worker(s) \"\n \"resulted in a timeout (after the configured \"\n f\"{self.config.evaluation_sample_timeout_s} seconds)! \"\n \"Try to set `evaluation_sample_timeout_s` in your config\"\n \" to a larger value.\"\n + (\n \" If your episodes don't terminate easily, you may \"\n \"also want to set `evaluation_duration_unit` to \"\n \"'timesteps' (instead of 'episodes').\"\n if unit == \"episodes\"\n else \"\"\n )\n )\n break\n\n _agent_steps = sum(b.agent_steps() for b in batches)\n _env_steps = sum(b.env_steps() for b in batches)\n # 1 episode per returned batch.\n if unit == \"episodes\":\n num_units_done += len(batches)\n # Make sure all batches are exactly one episode.\n for ma_batch in batches:\n ma_batch = ma_batch.as_multi_agent()\n for batch in ma_batch.policy_batches.values():\n assert batch.is_terminated_or_truncated()\n # n timesteps per returned batch.\n else:\n num_units_done += (\n _agent_steps\n if self.config.count_steps_by == \"agent_steps\"\n else _env_steps\n )\n if self.reward_estimators:\n # TODO: (kourosh) This approach will cause an OOM issue when\n # the dataset gets huge (should be ok for now).\n all_batches.extend(batches)\n\n agent_steps_this_iter += _agent_steps\n env_steps_this_iter += _env_steps\n\n logger.info(\n f\"Ran round {_round} of non-parallel evaluation \"\n f\"({num_units_done}/{duration if not auto else '?'} \"\n f\"{unit} done)\"\n )\n else:\n # Can't find a good way to run this evaluation.\n # Wait for next iteration.\n pass\n\n if metrics is None:\n metrics = collect_metrics(\n self.evaluation_workers,\n keep_custom_metrics=self.config.keep_per_episode_custom_metrics,\n timeout_seconds=eval_cfg.metrics_episode_collection_timeout_s,\n )\n\n # TODO: Don't dump sampler results into top-level.\n if not self.config.custom_evaluation_function:\n metrics = dict({\"sampler_results\": metrics}, **metrics)\n\n metrics[NUM_AGENT_STEPS_SAMPLED_THIS_ITER] = agent_steps_this_iter\n metrics[NUM_ENV_STEPS_SAMPLED_THIS_ITER] = env_steps_this_iter\n # TODO: Remove this key at some point. Here for backward compatibility.\n metrics[\"timesteps_this_iter\"] = env_steps_this_iter\n\n # Compute off-policy estimates\n estimates = defaultdict(list)\n # for each batch run the estimator's fwd pass\n for name, estimator in self.reward_estimators.items():\n for batch in all_batches:\n estimate_result = estimator.estimate(\n batch,\n split_batch_by_episode=self.config.ope_split_batch_by_episode,\n )\n estimates[name].append(estimate_result)\n\n # collate estimates from all batches\n if estimates:\n metrics[\"off_policy_estimator\"] = {}\n for name, estimate_list in estimates.items():\n avg_estimate = tree.map_structure(\n lambda *x: np.mean(x, axis=0), *estimate_list\n )\n metrics[\"off_policy_estimator\"][name] = avg_estimate\n\n # Evaluation does not run for every step.\n # Save evaluation metrics on Algorithm, so it can be attached to\n # subsequent step results as latest evaluation result.\n self.evaluation_metrics = {\"evaluation\": metrics}\n\n # Trigger `on_evaluate_end` callback.\n self.callbacks.on_evaluate_end(\n algorithm=self, evaluation_metrics=self.evaluation_metrics\n )\n\n # Also return the results here for convenience.\n return self.evaluation_metrics", "def evaluate(net, data_loader):\n net.reset()\n target_scores = []\n non_target_scores = []\n for data in tqdm(data_loader):\n sample_input, output = data[0], data[1]\n sample_input = whiten(sample_input)\n mask, score = gate_activation(net, sample_input)\n selected_score = score[mask]\n if selected_score.size == 0:\n xo = 0.5\n else:\n xo = np.sum(selected_score) / selected_score.size\n if output == 1:\n target_scores.append(xo)\n else:\n non_target_scores.append(xo)\n\n target_scores = np.array(target_scores)\n non_target_scores = np.array(non_target_scores)\n\n pmiss, pfa = rocch(target_scores, non_target_scores)\n eer = rocch2eer(pmiss, pfa)\n\n return eer", "def do_eval(sess,model,valid,batch_size):\n valid_X,valid_y,valid_p=valid\n number_examples=valid_X.shape[0]\n if number_examples>10000:\n number_examples=validation_size\n print(\"do_eval.valid.number_examples:\",number_examples)\n if number_examples>validation_size: valid_X,valid_y,valid_p=valid_X[0:validation_size],valid_y[0:validation_size],valid_p[0:validation_size]\n eval_loss,eval_counter,eval_acc=0.0,0,0.0\n for start,end in zip(range(0,number_examples,batch_size),range(batch_size,number_examples,batch_size)):\n feed_dict = {model.x_mask_lm: valid_X[start:end],model.y_mask_lm: valid_y[start:end],model.p_mask_lm:valid_p[start:end],\n model.dropout_keep_prob: 1.0} # FLAGS.dropout_keep_prob\n curr_eval_loss, logits_lm, accuracy_lm= sess.run([model.loss_val_lm,model.logits_lm,model.accuracy_lm],feed_dict) # logits:[batch_size,label_size]\n eval_loss=eval_loss+curr_eval_loss\n eval_acc=eval_acc+accuracy_lm\n eval_counter=eval_counter+1\n return eval_loss/float(eval_counter+small_value), eval_acc/float(eval_counter+small_value)", "def evaluate(self, X):\n\n raise NotImplementedError(\"not implemented!\")", "def evaluate(data_loader, model, device):\n\n\tmodel.eval()\n\ttotal_num_examples = 0\n\ttotal_error = 0\n\tfor idx, batch in enumerate(data_loader):\n\t\tquestion_feature_vec = batch['feature_vec'].to(device)\n\t\tquestion_len = batch['len'].to(device)\n\t\tlabels = batch['labels'].to(device)\n\n\t\t####Your code here ---\n\n\t\t# get the output from the model\n\t\tlogits = model(question_feature_vec, question_len)\n\n\t\t# get error, num_examples using accuracy_fn defined previously\n\t\terror, num_examples = accuracy_fn(logits, labels)\n\n\t\t# update total_error and total_num_examples\n\t\ttotal_error += error\n\t\ttotal_num_examples += num_examples\n\n\taccuracy = 1 - total_error / total_num_examples\n\treturn accuracy", "def evaluate(self):\n\n\t\tself.model_score = self.model.evaluate(self.x_test, self.y_test, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\t\treturn self.model_score", "def evaluate(self, g):\n pass", "def evaluate(self, network, verbose=False):\n # Convert to a network if it is not.\n\n if not isinstance(network, NeuralNetwork):\n network = NeuralNetwork(network)\n \n steps, states, _ = self._loop(network, max_steps=self.max_steps, verbose=verbose)\n \n if network.node_types[-1].__name__ != 'tanh':\n raise Exception(\"Network output must have range [-1, 1]\") \n \n if self.penalize_oscillation:\n \"\"\" This bit implements the fitness function as described in\n Stanley - Evolving Neural Networks through Augmenting Topologies\n \"\"\"\n f1 = steps/float(self.max_steps)\n if steps < 100:\n f2 = 0\n else:\n wiggle = sum(abs(x) + abs(dx) + abs(t[0]) + abs(dt[0]) for \n (x, dx, t, dt) in states[-100:])\n wiggle = max(wiggle, 0.01) # Cap the wiggle bonus\n f2 = 0.75 / wiggle\n score = 0.1 * f1 + 0.9 * f2\n else:\n \"\"\" This is just number of steps without falling.\n \"\"\"\n score = steps/float(self.max_steps)\n \n return {'fitness': score, 'steps': steps}", "def eval(self):\n # self.recognizer.eval()\n self.detector.eval()\n self.shared_conv.eval()", "def run_evaluation(net, loader):\n net.net.eval()\n losses_eval = {}\n for i, batch in enumerate(loader):\n with torch.no_grad():\n losses_batch = net.compute_loss(*batch, eval=True)\n append_losses(losses_eval, losses_batch)\n net.net.train()\n return losses_eval", "def evaluate(self, X):\n\n\t\tpass", "def evaluate(self, features, labels):\n raise NotImplementedError('Not implemented')", "def evaluate(self, x, y, batch_size=None, **kwargs):\n if not batch_size:\n batch_size = self.batch_size\n return self.model.evaluate(x, y, batch_size, **kwargs)", "def evaluate(self, x):\n if len(x.shape) == 1:\n x = x.reshape((1, -1))\n if x.shape[1] != self.dim_in-1:\n raise self.ANNException('input dimension not matching')\n nodes = self._forward(x)\n return nodes[:,-self.dim_out:]", "def evaluate(net, dev, batcher): \n def accuracy(outputs, labels):\n correct = 0\n total = 0\n misclassified = []\n for (i, output) in enumerate(outputs):\n total += 1\n if labels[i] == output.argmax():\n correct += 1 \n return correct, total, misclassified\n val_loader = batcher(dev, 128)\n total_val_loss = 0\n correct = 0\n total = 0\n misclassified = []\n loss = torch.nn.CrossEntropyLoss() \n for data in val_loader:\n inputs = data[:,1:]\n labels = torch.clamp(data[:,0], min=0).long()\n\n val_outputs = net(inputs) \n val_loss_size = loss(val_outputs, labels)\n\n correct_inc, total_inc, misclassified_inc = accuracy(val_outputs, \n labels)\n correct += correct_inc\n total += total_inc\n misclassified += misclassified_inc\n total_val_loss += val_loss_size.data.item()\n return correct/total, misclassified", "def evaluate(self):\n eval_list = nx.topological_sort(self.graph)\n for n in eval_list:\n n.evaluate()\n print(\"evaluating type\", type(n))\n\n # Notify observers of finished calculation\n self.notifyObservers(\"EVALUATION DONE\")\n return \"FINISHED\"", "def eval(self):\n return self.with_transforms(\"eval\")", "def evaluate(\n self, nodes, derivatives=np.array([0, 0, 0]), modes=None, unique=False\n ):", "def single_round_model_eval(train_fun, decode_fun, eval_fun, train_set,\n dev_set, metrics, metrics_weights):\n assert(len(metrics) > 0)\n assert(len(metrics) == len(metrics_weights))\n tf.reset_default_graph()\n try:\n train_fun(train_set, dev_set)\n \n tf.reset_default_graph()\n model_dir, decode_sig = decode_fun(dev_set, verbose=False)\n\n M = eval_fun(dev_set, model_dir, decode_sig, verbose=False)\n\n metrics_value = 0\n for m, m_w in zip(metrics, metrics_weights):\n metrics_value += m_w * M[m]\n except graph_utils.InfPerplexityError:\n metrics_value = -np.inf\n\n return metrics_value", "def evaluate(self, x):\n return self.function(x)", "def _run_evaluator(self, func, stats):\n host_stats = stats['host_stats']\n host_caps = stats['host_caps']\n extra_specs = stats['extra_specs']\n share_stats = stats['share_stats']\n\n result = evaluator.evaluate(\n func,\n extra=extra_specs,\n stats=host_stats,\n capabilities=host_caps,\n share=share_stats)\n\n return result", "def evaluate(data_loader, model, device):\n model.eval()\n\n loss_ = []\n with torch.no_grad():\n for idx, batch in enumerate(data_loader):\n data = batch.to(device)\n outputs = model.forward(data)\n loss_.append(F.l1_loss(outputs, data).data.numpy())\n\n return np.mean(loss_)", "def evaluate(path_to_config, path_to_model):\n\n config, paths, session_id = setup(path_to_config, 1)\n assert isinstance(config, ExperimentConfig)\n logger = logging.getLogger(\"%s.main\" % config.name)\n\n logger.info(\"Evaluating network on test data\")\n\n network = Network(config, paths, session_id, 0)\n network.build()\n network.evaluate(DATA_TYPE_TEST, model_path=path_to_model)", "def evaluate_dataset(self, net, criterion, data_loader):\n is_training = net.training # Remember if the network is training or not\n\n net.eval() # Set to eval mode\n\n running_loss = 0.\n counter = 0\n print('Make sure this is set correctly')\n decode_type = 'single_frame' # Always use a decode_type of 'single_frame'\n use_single_frame = decode_type == 'single_frame'\n decode_type = self.get_decode_type(decode_type)\n last_batch = None\n localized = False # For graph net eval\n last_predicted_node_name = None # For graph net eval\n last_predicted_behavior_id = None # For graph net eval\n if isinstance(net, PhaseRNN):\n output = torch.tensor([[0.]], dtype=torch.float32)\n with torch.no_grad():\n for i, batch in enumerate(data_loader):\n raw_batch = batch # Only used for GraphNet\n vel, depth = decode_batch(batch, decode_type, self.cfg)\n\n # Sanity check\n if not isinstance(net, GraphNet):\n # Check that batch size is 1\n batch_size = vel.size(0) # Compute batch size\n assert batch_size == 1\n\n # Check if this is the beginning of a new episode\n if isinstance(net, PhaseRNN):\n is_new_episode = (Evaluator.is_new_episode(last_batch, batch)\n or self.is_new_episode(output))\n elif isinstance(net, GraphNet):\n if not isinstance(batch, dict): # Graph info provided\n has_graph_info = True\n batch = vel[1]['batch']\n else:\n has_graph_info = False\n # While we have not localized the agent in this episode yet, keep the\n # is_new_episode flag to True. Note that localized could be set to False from a\n # previous iteration.\n is_new_episode = self.is_new_episode(last_batch, batch) or not localized\n else:\n is_new_episode = self.is_new_episode(last_batch, batch)\n\n if is_new_episode:\n localized = False # For graph net eval\n last_predicted_node_name = None # For graph net eval\n last_predicted_behavior_id = None # For graph net eval\n\n # Update\n last_batch = batch\n\n # If GraphNet, check if localized (AKA if graph info has been provided)\n if isinstance(net, GraphNet) and (has_graph_info is True):\n localized = True # For graph net eval\n\n if isinstance(net, GraphNet) and (localized is True):\n # Agent has been localized\n # Set up the input to the network (combine it all into the depth variable)\n if (is_new_episode is True) or True and (vel is not None):\n cur_area_graph = self.sem_graphs[batch['area_name'][0]]\n initial_graph_net_input, _ = vel\n depth = {\n 'depth': depth,\n 'graph_net_input': initial_graph_net_input,\n }\n else:\n # We are in the middle of a rollout for this episode. Provide the subgraph\n # based on the previous localization prediction.\n\n cur_graph_net_input = self.construct_graph_net_input(\n self.cfg,\n cur_area_graph,\n last_predicted_node_name,\n last_predicted_behavior_id,\n decode_type,\n batch,\n )\n\n depth = {\n 'depth': depth,\n 'graph_net_input': cur_graph_net_input,\n }\n\n # Start evaluating the sequence/episode once we have found the starting node/position\n if isinstance(net, GraphNet) and (localized is False):\n continue\n\n # Sanity check\n if isinstance(net, GraphNet) and (localized is False):\n assert torch.unique(depth['graph_net_input']['graph_idx_of_node']) == 0\n assert torch.unique(depth['graph_net_input']['graph_idx_of_edge']) == 0\n\n output = self.predict(net, depth, is_new_episode)\n if isinstance(net, GraphNet):\n # Decode the output of GraphNetEvaluator.predict() and update the predicted location\n output, last_predicted_node_name, last_predicted_behavior_id = output\n\n if has_graph_info is False:\n # We cannot compare with GT since no ground truth is provided\n continue\n\n # Use a different criterion (from training) for evaluating the GraphNet\n # Ignore the input criterion and measure accuracy instead\n gt_graph_net_dict, target_output = vel\n node_names_of_gt_subgraph = gt_graph_net_dict['node_names']\n edge_categories_of_gt_subgraph = gt_graph_net_dict['edge_categories']\n assert len(target_output['gt_node_idx']) == 1\n assert len(target_output['gt_edge_idx']) == 1\n gt_node_name = node_names_of_gt_subgraph[target_output['gt_node_idx'][0]]\n gt_behavior_category_enum = BehaviorCategory(int(edge_categories_of_gt_subgraph[target_output['gt_edge_idx'][0]]))\n gt_behavior_id = gt_behavior_category_enum.name\n\n assert self.cfg.gn_classification == 'edge'\n loss = (gt_node_name == last_predicted_node_name) and (gt_behavior_id == last_predicted_behavior_id)\n loss = float(loss)\n else:\n loss = criterion(output, vel)\n\n # Update counters\n counter += 1\n if isinstance(loss, float):\n running_loss += loss\n else:\n running_loss += loss.item()\n\n if (i + 1) % self.cfg.print_freq == 0:\n print(' evaluated %d iterations: %f' % (i + 1, running_loss / counter))\n\n # Display the prediction\n if self.cfg.visualize_results is True:\n # Modify dataset_item to include the prediction\n if isinstance(net, GraphNet):\n prediction_as_str = self.prediction2str(output, last_predicted_node_name, last_predicted_behavior_id)\n else:\n prediction_as_str = self.prediction2str(output)\n\n if isinstance(net, GraphNet) and (has_graph_info is True): # Make sure to visualize graph info if provided\n dataset_item = raw_batch\n dataset_item[0]['prediction_str'] = prediction_as_str\n else:\n dataset_item = batch\n dataset_item['prediction_str'] = prediction_as_str\n\n # Visualize\n to_break = self.dataset_visualizer.visualize_data_loader_item(dataset_item, use_frame_by_frame=use_single_frame)\n if to_break:\n break\n\n if is_training:\n net.train() # Set to train mode\n return running_loss / counter", "def evaluate(self, g):\n raise NotImplementedError", "def evaluate(self, valid_x, valid_y):\n results = self.model.evaluate(valid_x, valid_y, batch_size=self.BATCH_SIZE)\n print(results)\n return results[1:] + [\"NaN\"]", "def eval_model(self):\n self.decoder.eval()\n\n validation_losses_in_epoch = []\n\n with torch.no_grad():\n # validation iterations loop\n for inputs1, inputs2, targets in zip(self.train_loader_enc1, self.train_loader_enc2,\n self.train_loader_pred):\n # get encoded inputs and targets\n encoded_inputs_tensor = encode_inputs(inputs1, inputs2, self.encoder1, self.encoder2, self.device)\n targets = targets[0]\n\n # get outputs\n outputs = self.decoder(encoded_inputs_tensor)\n\n # calculate loss and add to list\n loss = self.loss_criterion(outputs, targets)\n loss_item = loss.cpu().detach().item()\n validation_losses_in_epoch.append(loss_item)\n\n # calculate average validation loss for epoch\n average_validation_loss = sum(validation_losses_in_epoch) / len(validation_losses_in_epoch)\n\n return average_validation_loss", "def evaluate(self, definition):\n self.checkModelOpen()\n calcEngine = CalcEngine.factory(self.client_session)\n return calcEngine.evaluate(definition)", "def eval(self):\n raise NotImplementedError('Must define eval function to use this base class')", "def evaluate():\n tf.compat.v1.enable_eager_execution()\n\n candidate_checkpoint = None\n uflow = uflow_main.create_uflow()\n evaluate_fn, _ = uflow_data.make_eval_function(\n FLAGS.eval_on,\n FLAGS.height,\n FLAGS.width,\n progress_bar=True,\n plot_dir=FLAGS.plot_dir,\n num_plots=50)\n\n latest_checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n while 1:\n # Wait for a new checkpoint\n while candidate_checkpoint == latest_checkpoint:\n logging.log_every_n(logging.INFO,\n 'Waiting for a new checkpoint, at %s, latest is %s',\n 20, FLAGS.checkpoint_dir, latest_checkpoint)\n time.sleep(0.5)\n candidate_checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n candidate_checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n latest_checkpoint = candidate_checkpoint\n logging.info('New checkpoint found: %s', candidate_checkpoint)\n # This forces the checkpoint manager to reexamine the checkpoint directory\n # and become aware of the new checkpoint.\n uflow.update_checkpoint_dir(FLAGS.checkpoint_dir)\n uflow.restore()\n eval_results = evaluate_fn(uflow)\n uflow_plotting.print_eval(eval_results)\n step = tf.compat.v1.train.get_global_step().numpy()\n if step >= FLAGS.num_train_steps:\n logging.info('Evaluator terminating - completed evaluation of checkpoint '\n 'from step %d', step)\n return", "def val(self):\n self.set_eval()\n try:\n inputs = self.val_iter.next()\n except StopIteration:\n self.val_iter = iter(self.val_loader)\n inputs = self.val_iter.next()\n\n with torch.no_grad():\n outputs, losses = self.process_batch(inputs)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n self.log(\"val\", inputs, outputs, losses)\n del inputs, outputs, losses\n\n self.set_train()", "def evaluate(self, x):\r\n return self.forward(x)[0]", "def run(self, x):\n \"*** YOUR CODE HERE ***\"\n return nn.DotProduct(x, self.get_weights())", "def run(self, x):\n \"*** YOUR CODE HERE ***\"\n return nn.DotProduct(x, self.get_weights())", "def eval(self):\n self.train(mode=False)", "def test(eval_loader, model, criterion, epoch, device, config, tf_writer, prepare_embeddings_fn, embedder):\n\n model.eval() # eval mode disables dropout\n\n losses = AverageMeter() # cross entropy loss\n accs = AverageMeter() # accuracies\n\n # Batches\n for _, data in enumerate(eval_loader):\n\n # Perform embedding + padding\n embeddings, labels = prepare_embeddings_fn(data, embedder, device, config)\n\n # Forward prop.\n output = model(embeddings)\n\n # Perform regularization on embedding weights -- not all models support this\n if config.model.use_regularization == \"none\":\n loss = criterion(output[\"logits\"].to(device), labels)\n elif config.model.use_regularization == \"l1\":\n # Regularization on embedding weights\n emb_weights_norm = torch.norm(model.emb_weights, p=1)\n # Loss\n loss = criterion(output[\"logits\"].to(device), labels) + config.model.regularization_lambda * emb_weights_norm # scalar\n else:\n raise NotImplementedError(\"Regularization other than 'none' or 'l1' not supported\")\n\n # Find accuracy\n _, predictions = output[\"logits\"].max(dim=1) # (n_documents)\n correct_predictions = torch.eq(predictions, labels).sum().item()\n accuracy = correct_predictions / labels.size(0)\n\n # Keep track of metrics\n losses.update(loss.item(), labels.size(0))\n accs.update(accuracy, labels.size(0))\n \n try:\n for sentence in data:\n sentence.clear_embeddings()\n except:\n pass\n\n # Print eval status\n print('Evaluation:\\t'\n 'Eval Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Eval Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(loss=losses, acc=accs), flush=True)\n\n # Log the running loss, accuracy\n tf_writer.add_scalar('test loss (avg. epoch)', losses.avg, epoch)\n tf_writer.add_scalar('test accuracy (avg. epoch)', accs.avg, epoch)", "def val(self):\n self.set_eval()\n try:\n inputs = self.val_iter.next()\n except StopIteration:\n self.val_iter = iter(self.val_loader)\n inputs = self.val_iter.next()\n\n with torch.no_grad():\n outputs, losses = self.process_batch(inputs)\n\n if \"depth_gt_l\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n self.log(\"val\", inputs, outputs, losses)\n del inputs, outputs, losses\n\n self.set_train()", "def apply_fun(network_params, tokens):\n emb_params, rnn_params, readout_params = network_params\n\n # Apply the embedding.\n inputs = emb_apply(emb_params, tokens)\n\n # Run the RNN.\n initial_states = cell.get_initial_state(rnn_params,\n batch_size=tokens.shape[0])\n return unroll.unroll_rnn(initial_states, inputs,\n functools.partial(cell.batch_apply, rnn_params),\n functools.partial(readout_apply, readout_params))", "def evaluate(self, session, *args, evaluate_data_iterator=None, **kwargs):\n\n raise NotImplementedError(\"Implement evaluate() method\")", "def evaluate(net, data_loader):\n correct = 0\n total = 0\n net.reset()\n for data in tqdm(data_loader):\n inputs, output = data\n mask, score = gate_activation(net, inputs.view(-1))\n selected_score = score[mask]\n if selected_score.size == 0:\n xo = 0.5\n else:\n xo = np.sum(selected_score) / selected_score.size\n print()\n print()\n print(\"mask\", mask)\n print(\"score\", score)\n print(\"xo\", xo)\n total += 1\n correct += ((xo > 0.5) == output[0].item())\n\n return float(correct)/total", "def eval_graph(self, x, scope, **kwargs):\n raise NotImplementedError(\"Please implement evaluation graph\")" ]
[ "0.65809673", "0.65288246", "0.63682526", "0.631708", "0.6305707", "0.63028127", "0.6297462", "0.6287825", "0.62783986", "0.627571", "0.6149087", "0.604402", "0.6024852", "0.60223997", "0.60162747", "0.6010527", "0.5999741", "0.5989639", "0.5989639", "0.5984649", "0.5969839", "0.59422195", "0.5921704", "0.59209925", "0.59166795", "0.5905076", "0.59007573", "0.58928597", "0.5887791", "0.58707345", "0.5869466", "0.58477765", "0.5844882", "0.5843658", "0.58186966", "0.5814154", "0.58129406", "0.5810027", "0.5778185", "0.5777126", "0.57679695", "0.5767632", "0.5748208", "0.5743956", "0.5736738", "0.5732054", "0.5730201", "0.57243407", "0.5710618", "0.56925756", "0.5675345", "0.5675345", "0.56747085", "0.5662048", "0.5661206", "0.5655448", "0.56546676", "0.5641236", "0.5641128", "0.56323135", "0.5628984", "0.56274813", "0.56266457", "0.5625837", "0.56236166", "0.56212014", "0.5617734", "0.5613724", "0.5613649", "0.5607878", "0.5604958", "0.56048167", "0.5597406", "0.5595991", "0.55940396", "0.55861276", "0.5585436", "0.5584433", "0.55809814", "0.5579157", "0.5579082", "0.5573386", "0.5567824", "0.5561986", "0.556077", "0.55590683", "0.5558343", "0.55550987", "0.5551309", "0.55456483", "0.554127", "0.55297565", "0.55297565", "0.55276424", "0.5527063", "0.5525506", "0.55231094", "0.5522564", "0.55219704", "0.55161905" ]
0.5704392
49
collect from a batch of VoxWave Dataset.
def collate_fn(batch): file = [item["file"] for item in batch] wave = torch.cat([item["wave"] for item in batch], dim=0) return {"file": file, "wave": wave}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data(self, train=True, batch_size=2):\n if train:\n elements = self.prepare_batch(self.training_albums)\n else:\n elements = self.prepare_batch(self.validation_albums)\n\n while len(elements) > 0:\n # Collect the batch\n batch = []\n for _ in range(min(batch_size, len(elements))):\n batch.append(elements.pop())\n\n # Get same sequence size for all elements of the batch\n albums, labels = self.batchify(batch)\n yield albums, labels", "def process(self, data_batch: Any, data_samples: Sequence[dict]) -> None:", "def extract(self):\n\n # print some infos about data\n print(\"\\n--extract batches from data:\\ntrain: {}\\nval: {}\\ntest: {}\\n\".format(self.data[0]['x'].shape, self.data[1]['x'].shape, self.data[2]['x'].shape))\n\n # create batches\n self.x_train, self.y_train, _ = self.create_batches(self.data[0], batch_size=self.batch_size)\n self.x_val, self.y_val, _ = self.create_batches(self.data[1], batch_size=self.batch_size_eval)\n self.x_test, self.y_test, _ = self.create_batches(self.data[2], batch_size=self.batch_size_eval)\n\n # my data\n if len(self.mfcc_data_files) == 4:\n self.x_my, self.y_my, self.z_my = self.create_batches(self.data[3], batch_size=1)", "def split_and_load(batch_data, num_gpus):\n return [batch_data[i].data[0] for i in range(num_gpus)], \\\n [batch_data[i].label[0].as_in_context(mx.gpu(i)) for i in range(num_gpus)]", "def collect_fn(batch):\r\n # max_detection = max(list(map(lambda x: len(x[5]), batch)))\r\n max_detection = max(list(map(lambda x: len(x), batch)))\r\n for i in range(len(batch)):\r\n batch[i] = list(batch[i]) # because the element in the batch is a tuple\r\n dummy = torch.zeros((1,5), dtype=batch[i][5].dtype)\r\n temp = batch[i][5]\r\n # make the detection to the same length in order to stack the\r\n while temp.size(0) < max_detection:\r\n temp = torch.cat((temp, dummy))\r\n batch[i][5] = temp\r\n \r\n return default_collate(batch)", "def _get_batch_data(self, batch):\n try:\n encoders = [ encoder for encoder in self._data_encoder ]\n except:\n encoders = (self._data_encoder,)\n\n try:\n data_batches = [ encoder.transform_batch(rec for _, rec in batch.iterrows())\n for encoder in encoders ]\n except AttributeError:\n data_batches = [\n [ self._get_data(record, encoder) for _, record in batch.iterrows() ]\n for encoder in encoders ]\n\n try:\n batches = [ np.array(encoder.finalize_batch(batch))\n for encoder, batch in zip(encoders, data_batches)]\n except AttributeError:\n batches = [ np.array(batch) for batch in data_batches ]\n\n return batches if len(batches) > 1 else batches[0]", "def data_gen(\n v: int, batch: int, nbatches: int, device: torch.device = torch.device(\"cpu\")\n) -> Iterator[Batch]: # TODO bad name\n for i in range(nbatches):\n data = np.random.randint(1, v, size=(batch, 10))\n data[:, 0] = 1\n src: LongTensorType = torch.from_numpy(data)\n tgt: LongTensorType = torch.from_numpy(data)\n src, tgt = src.to(device), tgt.to(device)\n yield Batch(src, tgt, 0)", "def _get_batch_data(batch, ctx):\n data, label = batch\n return (mx.gluon.utils.split_and_load(data, ctx),\n mx.gluon.utils.split_and_load(label, ctx),\n data.shape[0])", "def collect(self):\r\n for idx_ds, ds in enumerate(self.datasets):\r\n # print \"collect() using ds = \", repr(ds)\r\n reader_rows = ds.dict_reader()\r\n for row in reader_rows:\r\n # print \"Collect: idx_ds=%d\" % idx_ds\r\n for summary in self.summaries:\r\n #Derive coords from the row for this summary\r\n coords=[]\r\n for sum_col in summary.columns:\r\n level = str(row[sum_col.name])\r\n if level is not None and level != \"None\": \r\n if level.find('.') != -1:\r\n # May be a float value with .0 ending to trim\r\n try:\r\n # If value is parsable as a float, and it \r\n # is an integer, represent it as an integer.\r\n flevel = float(level)\r\n # Strip a final .0 from the string.\r\n level = (\r\n str(int(flevel)) if flevel == int(flevel)\r\n else str(level))\r\n except:\r\n # Not a float, OK.\r\n pass\r\n else:\r\n level = \"\"\r\n coords.append(level)\r\n #print \"coords:\", repr(coords)\r\n #Register row data into this summary.\r\n cell = summary.cell(coords)\r\n #Future, along with ds_index, could also pass along \r\n # row's ordinal column values.\r\n # Note to self: rename accrue_row to accrue_row() \r\n # when get into eclipse env\r\n cell.entry.accrue_row(idx_ds)", "def create_features_from_vids():\n\n dtype = get_dtype()\n feature_extractor = FeatureExtractor()\n feature_extractor.eval()\n feature_extractor.type(dtype)\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n dataset = StrokesDataset('../dataset/my_dataset/patches/labels.csv', '../dataset/my_dataset/patches/',\n transform=transforms.Compose([ToTensor(), normalize]), use_features=False)\n batch_size = 32\n count = 0\n for vid in dataset:\n count += 1\n frames = vid['frames']\n print(len(frames))\n\n features = []\n for batch in frames.split(batch_size):\n batch = batch.type(dtype)\n with torch.no_grad():\n # forward pass\n batch_features = feature_extractor(batch)\n features.append(batch_features.cpu().numpy())\n\n df = pd.DataFrame(np.concatenate(features, axis=0))\n\n outfile_path = os.path.join('../dataset/my_dataset/patches/', os.path.splitext(vid['vid_name'])[0] + '.csv')\n df.to_csv(outfile_path, index=False)\n\n print(count)", "def process(self, data_batch: Sequence[Dict],\n data_samples: Sequence[Dict]) -> None:\n for data_sample in data_samples:\n pred_labels = data_sample.get('pred_instances').get(self.key).cpu()\n gt_labels = data_sample.get('gt_instances').get(self.key).cpu()\n\n result = dict(\n pred_labels=pred_labels.flatten(),\n gt_labels=gt_labels.flatten())\n self.results.append(result)", "def split_and_load(batch, ctx_list):\n new_batch = []\n for i, data in enumerate(batch):\n if isinstance(data, (list, tuple)):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n else:\n new_data = [data.as_in_context(ctx_list[0])]\n new_batch.append(new_data)\n return new_batch", "def __iter__(self):\n batch = []\n for sample in self.dataset:\n batch.append(sample)\n if len(batch) == self.size:\n yield self.transform(batch)\n batch = []\n if batch:\n # the last batch may be less then batch size.\n yield self.transform(batch)", "def _batchify(self, data_containers: Dict, batch_size):\n\n X = Variable(torch.LongTensor(data_containers['X'])).to(self.device)\n Y = Variable(torch.FloatTensor(data_containers['Y'])).to(self.device)\n\n data_size = X.size()[0]\n num_batches = data_size // batch_size\n\n return [\n (X[bi * batch_size: (bi + 1) * batch_size],\n Y[bi * batch_size: (bi + 1) * batch_size].unsqueeze(1))\n for bi in range(num_batches + 1)\n ]", "def split_and_load(batch, ctx_list):\n num_ctx = len(ctx_list)\n new_batch = []\n for i, data in enumerate(batch):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n new_batch.append(new_data)\n return new_batch", "def split_and_load(batch, ctx_list):\n num_ctx = len(ctx_list)\n new_batch = []\n for i, data in enumerate(batch):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n new_batch.append(new_data)\n return new_batch", "def _data_generation(self, batch_data):\n # Initialization\n batch_x = []\n batch_y = defaultdict(list)\n\n for ind, item_data in batch_data.iterrows():\n img_path = os.path.join(self.img_dir, \"images\", \"rgb\", item_data[\"name\"])\n img = cv2.imread(img_path)\n try:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n except Exception as error:\n print(img_path)\n print(error)\n not_valid_mask = self.read_masks_borders(item_data[\"name\"])\n img[not_valid_mask] = 0\n\n # getmasks\n targets = np.zeros((img.shape[0], img.shape[1], len(self.classes)))\n for i, c in enumerate(self.classes):\n mask_path = os.path.join(self.img_dir, \"labels\", c, item_data[\"name\"])\n mask = cv2.imread(\n mask_path.replace(\".jpg\", \".png\"), cv2.IMREAD_GRAYSCALE\n )\n mask[not_valid_mask[:, :, 0]] = 0\n mask = mask > 0\n targets[:, :, i] = mask\n\n res = self.reshape_func(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n if self.do_aug:\n res = self.aug(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n\n for i, c in enumerate(self.classes):\n batch_y[c].append(targets[:, :, i])\n\n batch_x.append(img)\n\n batch_x = np.array(batch_x, np.float32)\n batch_y = {k: np.array(v, np.float32) for k, v in batch_y.items()}\n batch_y = {k: np.expand_dims(v, axis=-1) for k, v in batch_y.items()}\n\n return (\n imagenet_utils.preprocess_input(batch_x, \"channels_last\", mode=\"tf\"),\n batch_y\n )", "def batch_data(cls, train_data, train_labels, batch_size):\n for batch in range(int(np.ceil(train_data.shape[0] / batch_size))):\n start = batch_size * batch\n end = start + batch_size\n if end > train_data.shape[0]:\n yield batch, (train_data[start:train_data.shape[0]], \\\n train_labels[start:train_data.shape[0]])\n else:\n yield batch, (train_data[start:end], \\\n train_labels[start:end])", "def __getitem__(self, index):\r\n\r\n # Generate indexes of the batch\r\n indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]\r\n\r\n # Find list of IDs\r\n list_ids_temp = [self.list_IDs[k] for k in indexes]\r\n\r\n # Calls function to load batch of data into memory\r\n X, y = self.__data_generation(list_ids_temp)\r\n\r\n return X, y", "def _get_batch(self):\n # index = self._index[self._current]\n # im_path = self._imdb.image_path_from_index(0)\n # im_path = 'data/demo/dog.jpg'\n # with open(im_path, 'rb') as fp:\n # img_content = fp.read()\n\n batch_data = mx.nd.zeros((self.batch_size, 3, self._data_shape[0], self._data_shape[1]))\n batch_label = [] \n global imgi\n # img = mx.nd.array(imgi)\n # imgr = mx.img.imdecode(img_content)\n data = self._data_augmentation(imgi)\n batch_data[0] = data\n \n self._data = {'data': batch_data}\n self._label = {'label': None}", "def prepareDataBatches(self, traindata, trainlabel):\n index = np.random.permutation(len(traindata))\n traindata = traindata[index]\n trainlabel = trainlabel[index]\n split_no = int(len(traindata) / self.batchSize)\n return zip(np.split(traindata[:split_no*self.batchSize], split_no), np.split(trainlabel[:split_no*self.batchSize], split_no))", "def _proc_dataset(d):\n # merge 2dseq complex frame group if present\n if d.is_complex and d.type == '2dseq':\n d = FrameGroupMerger().merge(d, 'FG_COMPLEX')\n\n # prepare the data array\n if d.is_svs:\n data = _prep_data_svs(d)\n elif d.is_mrsi:\n data = _prep_data_mrsi(d)\n else:\n data = d.data\n\n # get properties\n properties = d.to_dict()\n\n # some Bruker datasets do not have affine property\n if d.type == 'fid': if not 'affine' in properties: properties.update({'affine':np.identity(4)})\n \n yield data, properties", "def _batch(self, X, y, batch_size=16):\n X, y = self._shuffle(X, y) # shuffle the data\n self.batches = []\n idx = 0\n while idx < len(X):\n batch = (X[idx:idx+batch_size], y[idx:idx+batch_size])\n self.batches.append(batch)\n idx += batch_size", "def __data_generation(self, rows):\n samples = np.zeros((rows, self.image_width, self.image_height, self.image_depth))\n targets = np.zeros((rows, self.image_width, self.image_height, self.num_classes))\n for j in range(rows):\n for row1, row2 in zip(self.reader1, self.reader2):\n array_row1 = np.array(row1, dtype=np.float)\n samples[j,:,:,:] = preprocess_feature(array_row1,\n self.image_width, self.image_height, self.image_depth)\n try:\n next(self.reader1)\n except StopIteration:\n print(\"CSV iteration end for feature. Calling 'break'.\")\n break\n\n array_row2 = np.array(row2, dtype=np.int)\n targets[j,:,:,:] = preprocess_label(array_row2,\n self.image_width, self.image_height, self.num_classes)\n try:\n next(self.reader2)\n except StopIteration:\n print(\"CSV iteration end for label. Calling 'break'.\")\n break\n\n return samples, targets", "def train_datas(self, batch_size):\r\n if not isinstance(batch_size, int):\r\n raise ValueError('In Dataset, batch_size should be int, get '\r\n '{}'.format(type(batch_size)))\r\n if batch_size <= 0:\r\n raise ValueError('In Dataset, batch_size should larger equal to '\r\n '1, get {}'.format(batch_size))\r\n \r\n indices = list(range(self.size))\r\n np.random.shuffle(indices)\r\n\r\n epoch_size = self.size // batch_size * batch_size\r\n self._train_datas = self._train_datas[indices][:epoch_size] # [epoch_size, ...]\r\n self._train_labels = self._train_labels[indices][:epoch_size] # [epoch_size, ...]\r\n \r\n datas = []\r\n for i in range(self.size // batch_size):\r\n # for label, we have box and landmark which is 0.\r\n datas.append([self._train_datas[i*batch_size:(i+1)*batch_size], \r\n self._train_labels[i*batch_size:(i+1)*batch_size]])\r\n return datas", "def train_data() -> Iterator[Tuple[Label, ChanneledImage]]:\n return zip(*get_data(TRAIN_FILES, 60000))", "def _extract_batch(self, data, batch_size):\n\n batch_size = batch_size or BATCH_SIZE\n\n batch = []\n try:\n for i in range(batch_size):\n batch.append(data.next())\n except StopIteration:\n pass\n\n return batch", "def process(dataset, f):\n logger.info('processing dataset ({0})'.format(len(dataset.samples)))\n for sample in dataset.samples:\n sample.proc = f(sample.image)", "def fetch_data(data, count, idx_batch, vocab_size):\r\n batch_size = len(idx_batch)\r\n data_batch = np.zeros((batch_size, vocab_size))\r\n count_batch = []\r\n mask = np.zeros(batch_size)\r\n indices = []\r\n values = []\r\n for i, doc_id in enumerate(idx_batch):\r\n if doc_id != -1:\r\n for word_id, freq in data[doc_id].items():\r\n data_batch[i, word_id] = freq\r\n count_batch.append(count[doc_id])\r\n mask[i]=1.0\r\n else:\r\n count_batch.append(0)\r\n return data_batch, count_batch, mask", "def collect_atlas(cls):\n yield from cls.collect()", "def next_simple_dataset(dataset, batch_size: int, datatype):\n while True:\n x_batch = []\n y_batch = []\n for i in range(batch_size):\n try:\n x, y, data_unit, index = create_xy(dataset, datatype)\n # x = normalize(x)\n x_batch.append(x)\n y_batch.append(y)\n except StopIteration:\n break\n x_batch, y_batch = np.array(x_batch), np.array(y_batch)\n if datatype != DataType.test:\n x_batch = SEQ_CVXTZ.augment_images(x_batch).astype(\"float32\")\n x_batch = np.array([normalize(x) for x in x_batch])\n # org_shape = x_batch.shape\n # org_width = x_batch.shape[1]\n # corner = int((org_width - ROI_IMAGE_SIZE) // 2)\n # print(f\"0: org_shape:{org_shape} x_batch:{x_batch.shape} corner:{corner}\")\n # x_batch = x_batch[:, corner:(org_width - corner), corner:(org_width - corner), :]\n # resized_x_batch = []\n # for x in x_batch:\n # img = Image.fromarray(np.uint8(x))\n # img = img.resize((IMAGE_SIZE, IMAGE_SIZE), Image.LANCZOS)\n # resized_x_batch.append(normalize(np.array(img)))\n # print(f\"1: org_shape:{org_shape} corner:{corner} x_batch:{x_batch.shape}\")\n # yield np.array(resized_x_batch), y_batch\n yield np.array(x_batch), y_batch", "def fetch_data(data, count, idx_batch, vocab_size):\r\n batch_size = len(idx_batch)\r\n data_batch = np.zeros((batch_size, vocab_size))\r\n count_batch = []\r\n mask = np.zeros(batch_size)\r\n for i, doc_id in enumerate(idx_batch):\r\n if doc_id != -1:\r\n for word_id, freq in data[doc_id].items():\r\n data_batch[i, word_id] = freq\r\n count_batch.append(count[doc_id])\r\n mask[i]=1.0\r\n else:\r\n count_batch.append(0)\r\n return data_batch, count_batch, mask", "def my_detection_collate(batch):\n targets_1 = []\n imgs = []\n for sample in batch:\n # each sample is the result of one query on the dataset object\n imgs.append(sample[0])\n targets_1.append(torch.FloatTensor(sample[1]))\n return torch.stack(imgs, 0), targets_1", "def batch_iter(input_data,batch_size):\r\n batch_ids,batch_mask,batch_segment,batch_label=[],[],[],[]\r\n for features in input_data:\r\n if len(batch_ids) == batch_size:\r\n yield batch_ids,batch_mask,batch_segment,batch_label\r\n batch_ids, batch_mask, batch_segment, batch_label = [], [], [], []\r\n\r\n batch_ids.append(features['input_ids'])\r\n batch_mask.append(features['input_mask'])\r\n batch_segment.append(features['segment_ids'])\r\n batch_label.append(features['label_ids'])\r\n\r\n if len(batch_ids) != 0:\r\n yield batch_ids, batch_mask, batch_segment, batch_label", "def collect_fn_local(batch):\r\n max_detection = max(list(map(lambda x: len(x[4]), batch)))\r\n for i in range(len(batch)):\r\n batch[i] = list(batch[i]) # because the element in the batch is a tuple\r\n dummy = torch.zeros((1,128,64), dtype=batch[i][4][0].dtype)\r\n temp = batch[i][4]\r\n # make the detection to the same length in order to stack the\r\n while temp.size(0) < max_detection:\r\n # while len(temp) < max_detection:\r\n temp = torch.cat((temp, dummy))\r\n # temp.append(dummy)\r\n batch[i][4] = temp\r\n \r\n return default_collate(batch)", "def batch(self, data, size):\n\n return [data[x : x + size] for x in range(0, len(data), size)]", "def set_batch_data():\r\n if not os.path.exists(filepath):\r\n download_data()\r\n for n in range(0,6):\r\n d = read(filepath + flist[n])\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, trts = {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n trts['x'], trts['y'] = d['data'], d['labels']\r\n trtsflag = ['train', 'train', 'train', 'train', 'train', 'test']\r\n\r\n data['flag'] = trtsflag[n]\r\n data[trtsflag[n]] = trts\r\n save_pkl(data, savename=flist[n]+'.pkl')", "def batch_data(images: np.ndarray, targets: np.ndarray, batch_size: int=100) \\\n -> Iterable[Tuple[np.ndarray, np.ndarray]]:", "def __data_generation(self, batch_data):\n X = np.zeros((self.batch_size, self.num_features), dtype=float)\n y = np.zeros((self.batch_size, self.num_outputs), dtype=float)\n\n for i, sample in batch_data.iterrows():\n # Get lat/long of pickup and dropoff locations\n PULocation = self.taxizone_data.loc[sample['PULocationID']].centroids\n PULocationLong, PULocationLat = PULocation.x, PULocation.y\n DOLocation = self.taxizone_data.loc[sample['DOLocationID']].centroids\n DOLocationLong, DOLocationLat = DOLocation.x, DOLocation.y\n\n # Get month date, day of week and hours/mins for pickup\n PUDateTime = datetime.strptime(sample.tpep_pickup_datetime, '%Y-%m-%d %H:%M:%S')\n PUDate = PUDateTime.strftime('%Y-%m-%d')\n PUYear, PUMonth, PUMonthDate = PUDate.split('-')\n # TODO - Add this to pre-processing of trip data! Some random months in the data!!\n if PUYear != '2018' or PUMonth != '06':\n continue\n PUDayOfWeek = PUDateTime.weekday()\n PUTimeHour, PUTimeMinute = datetime.strptime(\n sample.tpep_pickup_datetime, '%Y-%m-%d %H:%M:%S'\n ).strftime('%H:%M').split(':')\n\n # Get precipitation for that day\n Precipitation = self.weather_data[self.weather_data['DATE'] == PUDate]['PRCP'].values[0]\n\n X[i] = np.concatenate((np.array([\n\n PULocationLat,\n PULocationLong,\n DOLocationLat,\n DOLocationLong,\n abs((PULocationLat - DOLocationLat) ** 2 + abs(PULocationLong - DOLocationLong) ** 2) ** 0.5,\n Precipitation\n ]),\n to_categorical(PUDayOfWeek, 7),\n to_categorical(PUMonthDate, 31),\n to_categorical(PUTimeHour, 24)\n ))\n\n y[i] = [sample['duration']] if self.generator_type == 'duration' \\\n else [sample['total_amount'] - sample['tip_amount']]\n\n return X, y", "def __getitem__(self, index):\n # Generate indexes of the batch\n rows = self.metadata_dataframe.iloc[index * self.batch_size:(index + 1) * self.batch_size]\n names = rows['Name']\n\n rng = range(index * self.batch_size, (index + 1) * self.batch_size)\n img_files_temp = [names[k] for k in rng]\n # create batch item list\n img_batch_list = []\n meta_batch_list = []\n y_batch_list = []\n for img_file in img_files_temp:\n # Generate data\n print(\"IMAGE FILE:(\")\n print(img_file)\n img, meta, y = self.__data_generation(img_file)\n img_batch_list.append(img)\n meta_batch_list.append(meta)\n y_batch_list.append(y)\n\n # batch_inputs = (img_batch_list, meta_batch_list)\n # return batch_inputs #, y_batch_list\n return [np.array(img),np.array(meta_batch_list)], np.array(y_batch_list)", "def ingest_many(self, data):\n raise NotImplementedError()", "def data_iter(data, vocab, batch_size, shuffle=True, cuda=False):\n\n buckets = defaultdict(list)\n for pair in data:\n src_sent = pair[0]\n buckets[len(src_sent)].append(pair)\n\n batched_data = []\n for src_len in buckets:\n tuples = buckets[src_len]\n if shuffle: np.random.shuffle(tuples)\n batched_data.extend(list(HNCMDataLoader.batch_slice(tuples, batch_size)))\n\n if shuffle:\n np.random.shuffle(batched_data)\n for src_sents, trg_sents, fact_sents in batched_data:\n num_trg_word = sum(len(s[:-1]) for s in trg_sents)\n src_lengths = [len(s) for s in src_sents]\n src_seqs_var = to_input_var(src_sents, vocab.src, cuda)\n trg_seqs_var = to_input_var(trg_sents, vocab.trg, cuda)\n fact_lengths = [[len (s) for s in fact_sent] for fact_sent in fact_sents]\n fact_seqs_var = to_input_var_2d(fact_sents, vocab.src, cuda)\n\n yield {\n 'src_seq': src_seqs_var, 'src_lengths': src_lengths,\n 'fact_seq': fact_seqs_var, 'fact_lengths': fact_lengths,\n 'trg_seq': trg_seqs_var[:, :-1],\n 'target': trg_seqs_var[:, 1:],\n 'num_trg_word': num_trg_word, 'num_trg_seq': len(trg_sents)\n }", "def _run(self, index_list: List[np.ndarray]) -> Iterator[XData]:\n da_it = task_list(index_list, IdReader(), self.worker, self.nworkers)\n xdata_it = (dataarrays_to_xdata(d, self.meta) for d in da_it)\n return xdata_it", "def _dataset():\n dataset = tf_record_dataset(DatasetName.SIGNUM, DatasetType.TRAIN)\n dataset = dataset.batch(1)\n dataset = dataset.map(transform_for_prediction)\n dataset = dataset.unbatch()\n dataset = dataset.filter(lambda frames, label, signer: tf.math.equal(label, 420) and tf.math.equal(signer, 1))\n dataset = dataset.batch(1)\n return dataset.take(1)", "def __data_generation(self, list_IDs_temp):\n # Initialization\n if self.mirror:\n X = np.empty(\n (self.batch_size, *self.dim_in, self.n_channels_in),\n dtype=\"uint8\",\n ) \n else:\n X = np.empty(\n (self.batch_size * len(self.camnames[0]), *self.dim_in, self.n_channels_in),\n dtype=\"uint8\",\n )\n\n # We'll need to transpose this later such that channels are last,\n # but initializaing the array this ways gives us\n # more flexibility in terms of user-defined array sizes\\\n if self.labelmode == \"prob\":\n y = np.empty(\n (\n self.batch_size * len(self.camnames[0]),\n self.n_channels_out,\n *self.dim_out,\n ),\n dtype=\"float32\",\n )\n else:\n # Just return the targets, without making a meshgrid later\n y = np.empty(\n (\n self.batch_size * len(self.camnames[0]),\n self.n_channels_out,\n len(self.dim_out),\n ),\n dtype=\"float32\",\n )\n\n # Generate data\n cnt = 0\n for i, ID in enumerate(list_IDs_temp):\n if \"_\" in ID:\n experimentID = int(ID.split(\"_\")[0])\n else:\n # Then we only have one experiment\n experimentID = 0\n for _ci, camname in enumerate(self.camnames[experimentID]):\n # Store sample\n # TODO(Refactor): This section is tricky to read\n\n if not self.mirror or _ci == 0:\n if self.immode == \"video\":\n X[cnt] = self.load_vid_frame(\n self.labels[ID][\"frames\"][camname],\n camname,\n self.preload,\n self.extension,\n )[\n self.crop_height[0] : self.crop_height[1],\n self.crop_width[0] : self.crop_width[1],\n ]\n elif self.immode == \"tif\":\n X[cnt] = self.load_tif_frame(\n self.labels[ID][\"frames\"][camname], camname\n )[\n self.crop_height[0] : self.crop_height[1],\n self.crop_width[0] : self.crop_width[1],\n ]\n else:\n raise Exception(\"Not a valid image reading mode\")\n\n # Labels will now be the pixel positions of each joint.\n # Here, we convert them to\n # probability maps with a numpy meshgrid operation\n this_y = np.round(self.labels[ID][\"data\"][camname])\n if self.immode == \"video\":\n this_y[0, :] = this_y[0, :] - self.crop_width[0]\n this_y[1, :] = this_y[1, :] - self.crop_height[0]\n else:\n raise Exception(\n \"Unsupported image format. Needs to be video files.\"\n )\n\n # For 2D, this_y should be size (2, 20)\n if this_y.shape[1] != self.n_channels_out:\n # TODO(shape_exception):This should probably be its own\n # class that inherits from base exception\n raise Exception(_EXEP_MSG)\n\n if self.labelmode == \"prob\":\n # Only do this if we actually need the labels --\n # this is too slow otherwise\n (x_coord, y_coord) = np.meshgrid(\n np.arange(self.dim_out[1]), np.arange(self.dim_out[0])\n )\n for j in range(self.n_channels_out):\n # I tested a version of this with numpy broadcasting,\n # and looping was ~100ms seconds faster for making\n # 20 maps\n # In the future, a shortcut might be to \"stamp\" a\n # truncated Gaussian pdf onto the images, centered\n # at the peak\n y[cnt, j] = np.exp(\n -(\n (y_coord - this_y[1, j]) ** 2\n + (x_coord - this_y[0, j]) ** 2\n )\n / (2 * self.out_scale ** 2)\n )\n else:\n y[cnt] = this_y.T\n\n cnt = cnt + 1\n\n # Move channels last\n if self.labelmode == \"prob\":\n y = np.transpose(y, [0, 2, 3, 1])\n\n if self.mirror:\n # separate the batches from the cameras, and use the cameras as the numebr of channels \n # to make a single-shot multi-target prediction from a single image\n y = np.reshape(y, (self.batch_size, len(self.camnames[0]), y.shape[1], y.shape[2]))\n y = np.transpose(y, [0, 2, 3, 1])\n else:\n # One less dimension when not training with probability map targets\n y = np.transpose(y, [0, 2, 1])\n\n if self.downsample > 1:\n X = processing.downsample_batch(X, fac=self.downsample, method=self.dsmode)\n if self.labelmode == \"prob\":\n y = processing.downsample_batch(\n y, fac=self.downsample, method=self.dsmode\n )\n y /= np.max(np.max(y, axis=1), axis=1)[:, np.newaxis, np.newaxis, :]\n\n if self.mono and self.n_channels_in == 3:\n # Go from 3 to 1 channel using RGB conversion. This will also\n # work fine if there are just 3 channel grayscale\n X = X[:, :, :, 0]*0.2125 + \\\n X[:, :, :, 1]*0.7154 + \\\n X[:, :, :, 2]*0.0721\n\n X = X[:, :, :, np.newaxis]\n\n if self.mono:\n # Just subtract the mean imagent BGR value, which is as close as we\n # get to vgg19 normalization\n X -= 114.67\n else:\n X = pp_vgg19(X)\n return X, y", "def get_data(self, t_img_path, v_img_path, t_label_path, v_label_path):\n train_label_names = tf.constant(sorted(os.path.join(t_label_path, name) for name in os.listdir(t_label_path)))\n val_label_names = tf.constant(sorted(os.path.join(v_label_path, name) for name in os.listdir(v_label_path)))\n train_image_names = tf.constant(sorted(os.path.join(t_img_path, name) for name in os.listdir(t_img_path)))\n val_image_names = tf.constant(sorted(os.path.join(v_img_path, name) for name in os.listdir(v_img_path)))\n\n training_dataset = tf.data.Dataset.from_tensor_slices((train_image_names, train_label_names))\n training_dataset = training_dataset.shuffle(buffer_size=50000)\n training_dataset = training_dataset.map(self.dataset_resize_images, num_parallel_calls=4)\n training_dataset = training_dataset.map(\n lambda filename, label: tuple(tf.py_func(self.dataset_convert_labels, [filename, label], [tf.float32, tf.float32], stateful=False)),\n num_parallel_calls=4)\n training_dataset = training_dataset.prefetch(self.batch_size)\n training_dataset = training_dataset.batch(self.batch_size)\n training_dataset = training_dataset.repeat()\n\n val_dataset = tf.data.Dataset.from_tensor_slices((val_image_names, val_label_names))\n val_dataset = val_dataset.shuffle(buffer_size=5000)\n val_dataset = val_dataset.map(self.dataset_resize_images, num_parallel_calls=4)\n val_dataset = val_dataset.map(\n lambda filename, label: tuple(tf.py_func(self.dataset_convert_labels, [filename, label], [tf.float32, tf.float32], stateful=False)),\n num_parallel_calls=4)\n val_dataset = val_dataset.prefetch(self.batch_size)\n val_dataset = val_dataset.batch(self.batch_size)\n val_dataset = val_dataset.repeat()\n\n handle = tf.placeholder(tf.string, shape=[])\n iterator = tf.data.Iterator.from_string_handle(handle, training_dataset.output_types, training_dataset.output_shapes)\n images, labels = iterator.get_next()\n\n training_iterator = training_dataset.make_one_shot_iterator()\n validation_iterator = val_dataset.make_one_shot_iterator()\n\n return handle, training_iterator, validation_iterator, images, labels", "def processData(data):\n ids, instances, labels = [], [], []\n for i in data:\n idField = int(i[0])\n instance = i[1:-1]\n label = i[-1]\n ids.append(idField)\n instances.append(instance)\n labels.append(label)\n\n ids = np.array(ids)\n instances = np.array(instances)\n labels = np.array(labels)\n\n return (ids, instances, labels)", "def data_gen(voc_size, batch, nbatches, seq_len = 15):\r\n for i in range(nbatches):\r\n # (batch_size, seq_len)\r\n data = torch.from_numpy(\r\n np.random.randint(1, voc_size, size=(batch, seq_len)))\r\n data[:, 0] = 1 # add start token\r\n src = Variable(data, requires_grad=False)\r\n tgt = Variable(data, requires_grad=False)\r\n yield Batch(src, tgt, 0) # Accessed by next function one by one\r", "def train(self, batch):\n pass", "def prepare_data(self, context_size, model_name):\n self.context_size = context_size\n data_x = []\n data_y = []\n oob = self.word2idx['OOB']\n\n for item in self.docs:\n data = [oob] * context_size + self.doc2token(item) + [oob] * context_size #padding\n for i in range(context_size, len(data) - context_size):\n data_x.append(data[i - context_size: i] + data[i + 1: i + context_size + 1])\n data_y.append(data[i])\n \n if model_name.lower() == 'skipgram':\n data_x, data_y = data_y, data_x\n self.data_x = Variable(torch.LongTensor(data_x))\n self.data_y = Variable(torch.LongTensor(data_y))\n logging.info(f'data preprocessed, data shape: {self.data_x.shape}, {self.data_y.shape}')", "def batches(data, batch_size) -> list:\n rv = []\n for idx, line in enumerate(data):\n if idx != 0 and idx % batch_size == 0:\n yield rv\n rv = []\n rv.append(line)\n yield rv", "def _process(self):\n export_collect_data(self.kwargs[\"collect\"])", "def __getitem__(self, index):\n # Generate indexes of the batch\n batch_data = self.dataset_df[index * self.batch_size: (index + 1) * self.batch_size]\n\n # Generate data\n X, y = self._data_generation(batch_data)\n\n return X, y", "def train_datas_debug(self, batch_size):\r\n if not isinstance(batch_size, int):\r\n raise ValueError('In Dataset, batch_size should be int, get '\r\n '{}'.format(type(batch_size)))\r\n if batch_size <= 0:\r\n raise ValueError('In Dataset, batch_size should larger equal to '\r\n '1, get {}'.format(batch_size))\r\n \r\n indices = list(range(batch_size))\r\n \r\n datas = []\r\n # for label, we have box and landmark which is 0.\r\n datas.append([self._train_datas[:batch_size], \r\n self._train_labels[:batch_size]])\r\n return datas", "def evaluate_batch(self, pipelines):", "def split_to_batches(self, train_data, batch_size):\n num_of_training_examples = len(train_data)\n for i in range(0, num_of_training_examples, batch_size):\n x, y = zip(*train_data[i: i+batch_size])\n yield np.vstack(x), np.vstack(y)", "def collect_data(self):\n categories = []\n list_of_feature_lists = []\n feature_sets = set()\n with open(self.csv_path, \"r\") as f:\n reader = csv.DictReader(f)\n # collecting some stats for FDR calculation:\n self.PSM_count = 0\n self.decoy_count = 0\n\n if self[\"dump_svm_matrix\"]:\n self.init_svm_matrix_dump()\n additional_matrix_info = []\n\n for i, row in enumerate(\n sorted(\n reader,\n reverse=self[\"bigger_scores_better\"],\n key=lambda d: float(d[self.col_for_sorting]),\n )\n ):\n\n features = self.row_to_features(row)\n\n if tuple(features) in feature_sets:\n continue\n feature_sets.add(tuple(features))\n\n category, psm_FDR = self.get_psm_category(row)\n\n list_of_feature_lists.append(features)\n categories.append(category)\n\n if self[\"dump_svm_matrix\"]:\n label = -1 if row_is_decoy(row) else 1\n sequence = \"{0}.{1}#{2}.{3}\".format(\n row[\"Sequence Pre AA\"].strip(),\n row[\"Sequence\"].strip(),\n row[\"Modifications\"].strip(),\n row[\"Sequence Post AA\"].strip(),\n )\n additional_matrix_info.append(\n {\n \"psm_id\": row[\"Spectrum Title\"].strip(),\n \"label\": label,\n \"scannr\": row[\"Spectrum Title\"].strip().split(\".\")[-2],\n \"peptide\": sequence,\n \"proteins\": self.parse_protein_ids(row[\"Protein ID\"]),\n }\n )\n\n if i % 1000 == 0:\n score_val = float(row[self.col_for_sorting])\n msg = (\n \"Generating feature matrix from input csv \"\n \"(line ~{0}) with score {1} and FDR \"\n \"{2}\".format(i, score_val, psm_FDR)\n )\n print(msg, end=\"\\r\")\n\n # All data points are collected in one big matrix, to make standardization possible\n print(\"\\nConverting feature matrix to NumPy array...\")\n X_raw = np.array(list_of_feature_lists, dtype=float)\n\n print(\"Replacing empty/NaN values with the mean of each column...\")\n self.nan_replacer = Imputer()\n self.nan_replacer.fit(X_raw)\n X_raw = self.nan_replacer.transform(X_raw)\n # Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance\n print(\"Standardizing input matrix...\")\n self.scaler = SCALER.fit(X_raw)\n self.X = self.scaler.transform(X_raw)\n self.categories = np.array(categories)\n print()\n\n if self[\"dump_svm_matrix\"]:\n print(\"Dumping SVM matrix to\", self[\"dump_svm_matrix\"])\n\n for i, matrix_row in enumerate(self.X):\n matrix_row_info = additional_matrix_info[i]\n self.dump_svm_matrix_row(\n row=list(matrix_row),\n psm_id=matrix_row_info[\"psm_id\"],\n label=matrix_row_info[\"label\"],\n scannr=matrix_row_info[\"scannr\"],\n peptide=matrix_row_info[\"peptide\"],\n proteins=matrix_row_info[\"proteins\"],\n )\n\n print(\"Dumped SVM matrix to\", self[\"dump_svm_matrix\"])\n return", "def datasubset(loader, start, count, batch_size):\n # Note: start is the start index of batch, not image\n smaller_dataset = []\n end_idx = count / batch_size\n for batch_idx, (orig_images, labels) in enumerate(loader):\n if start <= batch_idx < end_idx:\n smaller_dataset.append((orig_images, labels))\n if batch_idx > end_idx:\n break\n return smaller_dataset", "def __getitem__(self, index):\n # Generate indexes of the batch\n indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n\n return X, y", "def __getitem__(self, index):\n # Generate indexes of the batch\n indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n\n return X, y", "def _gather_data(self):\n for data in self._collection:\n label = data.label\n label = disambiguate(label, self._data)\n self._data[label] = data", "def get_batches(self, batch_size):\n if self.data.shape[0] % batch_size != 0:\n raise RuntimeError('num of data tuples is not a multiple of batch size')\n num_batch = self.data.shape[0] // batch_size\n for b in range(num_batch):\n yield self.data[b*batch_size:(b+1)*batch_size, :], \\\n self.target[b*batch_size:(b+1)*batch_size, :]", "def process_state_batch(self, batch):\n # batch = np.squeeze(batch, axis=1)\n batch = np.array([np.concatenate(obs, axis=-1) for obs in batch])\n return batch", "def __getitem__(self, index):\n # Generate indexes of the batch\n indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n # Generate data\n X, Y = self.__data_generation(list_IDs_temp)\n\n return X, Y", "def batches(self, batch_size): \n if self.shuffle:\n idx = np.arange(len(dataset.train_x))\n np.random.shuffle(idx)\n self.train_x = self.train_x[idx]\n \n n_batches = len(self.train_x) // batch_size\n for ii in range(0, len(self.train_x), batch_size):\n x = self.train_x[ii:ii+batch_size]\n \n yield self.scaler(x)", "def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il", "def _collect_all(self):", "def __data_generation(self, batch_indices):\n\n X = self.__get_npy_arrays(batch_indices)\n y = self.__get_records(batch_indices)\n\n return X, y", "def run(self):\r\n self.collect_data()", "def individual_collate(batch):\n\n data = batch\n\n collected_data = defaultdict(list)\n\n for i in range(len(list(data))):\n for k in data[i].keys():\n collected_data[k].append(data[i][k])\n\n for k in collected_data.keys():\n collected_data[k] = torch.stack(collected_data[k])\n\n return collected_data", "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n\n if os.path.isdir(filenames):\n num_records = len(os.listdir(filenames))\n print(\"Loading from directory. \" + str(num_records) + \" tfRecords found.\")\n files = tf.data.Dataset.list_files(filenames + \"/\" + \"*.tfrecord\").shuffle(num_records)\n dataset = files.apply(\n tf.contrib.data.parallel_interleave(\n lambda x: tf.data.TFRecordDataset(x, num_parallel_reads=256, buffer_size=8*1024*1024),\n cycle_length=32, sloppy=True)\n )\n else:\n print(\"Loading from single tfRecord...\")\n dataset = tf.data.TFRecordDataset(filenames + \".tfrecord\").repeat()\n \n dataset = dataset.map(self.parser, num_parallel_calls=128)\n \n if self.subset == 'train':\n min_queue_examples = int(\n Cifar10DataSet.num_examples_per_epoch(self.subset) * 0.4)\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)\n \n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n dataset = dataset.prefetch(10)\n \n iterator = dataset.make_one_shot_iterator()\n seq_batch, input_batch, map_batch, transformation_batch = iterator.get_next()\n\n return seq_batch, input_batch, map_batch, transformation_batch", "def __getitem__(self, index):\n\n # X : (n_samples, v_size, v_size, v_size, n_channels)\n # Initialization\n tmp_indeces = self.index_list[\n index*self.batch_size: (index+1)*self.batch_size]\n\n X = []\n IDs = []\n if self.has_labels:\n y = []\n else:\n y = None\n\n # Generate data\n for _i, load_index in enumerate(tmp_indeces):\n image = np.copy(self.data[load_index][DATA_IMAGE_INDEX])\n IDs.append(self.data[load_index][DATA_ID_INDEX])\n if self.DEBUG:\n print('load index: {}'.format(load_index))\n print('loaded image_shape: {}'.format(image.shape))\n\n # generator can also be used with test set\n if self.has_labels:\n label = np.copy(self.data[load_index][DATA_LABEL_INDEX:])\n else:\n label = None\n if self.DEBUG:\n print('label:')\n print(label.shape)\n\n # perform augmentation\n for func in self.augmentation_functions:\n if self.DEBUG:\n print('func: {}'.format(func))\n try:\n image, label = func(image, label)\n except ValueError as Error:\n print(Error)\n print('-'*15)\n raise ValueError(\n 'Error in function {} for input {} and {}'.format(\n func, image, label\n )\n )\n if self.DEBUG:\n print('image_shape after aug: {}'.format(image.shape))\n print('label_shape after aug: {}'.format(label.shape))\n print(np.max(label))\n\n if image.ndim == 2:\n image = image[..., np.newaxis]\n\n X.append(image)\n if label is not None:\n if label.ndim == 2:\n label = label[:, :, np.newaxis]\n y.append(label)\n\n X = self._to_stack(X)\n if y is not None:\n y = self._to_stack(y)\n\n if self.batch_size == 1:\n IDs = IDs[0]\n\n if self.DEBUG:\n print('X: {}'.format(X))\n\n if self.return_ID:\n return X, y, IDs\n else:\n return X, y", "def _val_pipeline(self, ds_images, ds_labels):\n \n ds_zip = tf.data.Dataset.zip((ds_images, ds_labels))\n if self.val_count != 0:\n ds = (ds_zip.repeat(count=self.n_epochs)\n .batch(self.val_count)\n .prefetch(3))\n\n return ds", "def __call__(self):\n if self.numbatches is None:\n pool = self.pooler()\n if self.batchsize is None:\n self.batchsize = self.pooler.nInPool()\n self.numbatches = self.pooler.nInPool()//self.batchsize\n for i in xrange(self.numbatches):\n pool = self.pooler()\n self._reset_batch()\n if self.samplemethod == 'balance' and len(self.keysamplers)>0:\n batchinds,keyids = self._samplebalanced(pool)\n elif self.samplemethod == 'uniform':\n batchinds,keyids = self._sampleuniform(pool)\n else:\n batchinds,keyids = self._samplesequential(i)\n batch = self._extractInds(pool,batchinds,keyids)\n for k in batch:\n batch[k][np.isnan(batch[k])] = self.nanreplacement\n yield batch", "def features_combine():\n\n\n\t# PROCESSING AUDIO", "def __data_generation(self, list_IDs_temp):\n X1 = np.empty((len(list_IDs_temp), *self.dim, self.n_channels))\n X2 = np.empty((len(list_IDs_temp), *self.dim, self.n_channels))\n X3 = np.empty((len(list_IDs_temp), *self.dim, self.n_channels))\n X4 = np.empty((len(list_IDs_temp), *self.dim, self.n_channels))\n y = np.empty(len(list_IDs_temp), dtype=int)\n\n for i, ID_instance in enumerate(list_IDs_temp):\n\n with (self.path_instances / ID_instance).open('rb') as input:\n instance = pickle.load(input)\n\n frames = instance['frames']\n\n label = instance['crossing']\n\n #Normalización de los frames\n if self.normalized:\n frames = frames * 1 / 255\n\n X1[i, ] = frames[0]\n X2[i, ] = frames[1]\n X3[i, ] = frames[2]\n X4[i, ] = frames[3]\n y[i] = label\n\n return [X1, X2, X3, X4], to_categorical(y, num_classes=self.n_classes)", "def trainDataGenerator(num_epochs):\r\n samples, all_files = get_filenames()\r\n for num in range(num_epochs):\r\n for i in range(len(samples)):\r\n sample = samples[i]\r\n for file in all_files[i]:\r\n ohvs, Y = prepData(sample, file)\r\n if (ohvs == []):\r\n continue\r\n X = np.array([ohvs[:800]])\r\n yield X, Y\r\n # for i in range(0, len(ohvs), 400):\r\n # X = np.array([ohvs[i : i+400]])\r\n # print(\"\\tX shape =\", X.shape)\r\n # yield X, Y\r", "def ptb_producer(raw_data, batch_size, num_steps, word_to_id):\n x = []\n y = []\n n_batches = len(raw_data) // batch_size\n for sentence in raw_data:\n mask_index = get_mask_index(sentence)\n current_label = sentence[mask_index]\n sentence[mask_index] = word_to_id['<mask>']\n y.append(current_label)\n x.append(sentence)\n x = np.array(x)\n x = x[:n_batches*batch_size]\n x = np.reshape(x, [n_batches, batch_size, num_steps])\n y = np.array(y)\n y = y[:n_batches * batch_size]\n y = np.reshape(y, [n_batches, batch_size])\n return x, y", "def _read(self, documents):\n data = []\n X,Y = [], []\n for document in documents:\n d_ata = pd.read_csv(document, sep='\\t', names=['review','label'])\n data.append(d_ata)\n data = pd.concat(data)\n self.data = data\n Y = data.label\n self.vec.fit(data.review)\n X = self.preprocess(data)\n \n return train_test_split(X,Y)", "def run_batch_filter(self):\n if self.speed_data is None and self.flow_data is None and self.traveltime_data is None:\n print(\n 'Warning: The measurement data must be set before running the batch filter: use function self.set_meas_data()')\n\n # =======================================================================\n # the initial ensembles, which should have been set externally\n X_init = np.matrix(np.zeros((self.dim_state, self.num_ensembles)))\n print(\n 'Setting initial ensembles: rho {0}; qin {1}; qout {2}'.format(self.init_rho, self.init_qin, self.init_qout))\n for ens in range(0, self.num_ensembles):\n X_init[self.x_index['density'][0]:\n self.x_index['density'][self.num_cells - 1], ens] = self.init_rho\n X_init[self.x_index['qin'], ens] = self.init_qin\n X_init[self.x_index['qout'], ens] = self.init_qout\n\n # print('setted qin {0}; qout {1}'.format(X_init[self.x_index['qin'], ens], X_init[self.x_index['qout'], ens] ))\n # add noise to each ensemble\n X_init[:, ens] += np.matrix(np.random.multivariate_normal(\n np.zeros(self.dim_state), self.Q)).reshape((self.dim_state, 1))\n\n self.set_initial_ensembles(X_init)\n\n # =======================================================================\n # DEBUG\n # save the qin and qout in the corresponding probe data\n # save the initial state\n if self.__debug:\n self.qin_f.append(np.squeeze(np.array(self.X_f[self.x_index['qin'], :])).tolist())\n self.qin_a.append(np.squeeze(np.array(self.X_a[self.x_index['qin'], :])).tolist())\n self.qin_obs.append(np.nan)\n\n self.qout_f.append(np.squeeze(np.array(self.X_f[self.x_index['qout'], :])).tolist())\n self.qout_a.append(np.squeeze(np.array(self.X_a[self.x_index['qout'], :])).tolist())\n self.qout_obs.append(np.nan)\n\n # The enKF runs at the finest time grid\n # for each step, update the system\n for step in range(0, self.num_steps):\n\n # update status\n sys.stdout.write('\\r')\n sys.stdout.write('Status: filtering step {0}/{1}'.format(step, self.num_steps))\n sys.stdout.flush()\n # print('Status: filtering step {0}'.format(step))\n\n cur_time = (step + 1) * self.dur_steps\n\n # get the effective measurement\n eff_flow, eff_speed, eff_traveltime = self.__get_eff_meas(cur_time)\n\n # build the observation index\n self.y_index, self.dim_obs, y_obs, cov_noise = self.__build_obs_index(eff_flow, eff_speed, eff_traveltime)\n\n # update the estimate for this step\n est_state = self.update_estimate(y_obs, cov_noise, cur_time)\n\n # =======================================================================\n # DEBUG\n # save the qin and qout in the corresponding probe data\n # save the updated state\n if self.__debug:\n self.qin_f.append(np.squeeze(np.array(self.X_f[self.x_index['qin'], :])).tolist())\n self.qin_a.append(np.squeeze(np.array(self.X_a[self.x_index['qin'], :])).tolist())\n if 'flow' in self.y_index.keys() and self.__debug_entrance_sensor in self.y_index['flow'].keys():\n self.qin_obs.append(y_obs[self.y_index['flow'][self.__debug_entrance_sensor]])\n # print('y_index[flow]:{0}'.format(self.y_index['flow'].keys()))\n # print('y_obs[ y_index[flow][entrance] ]:{0}'.format(\n # y_obs[ self.y_index['flow'][self.__debug_entrance_sensor]],\n # self.__debug_entrance_sensor))\n else:\n self.qin_obs.append(np.nan)\n\n self.qout_f.append(np.squeeze(np.array(self.X_f[self.x_index['qout'], :])).tolist())\n self.qout_a.append(np.squeeze(np.array(self.X_a[self.x_index['qout'], :])).tolist())\n if 'flow' in self.y_index.keys() and self.__debug_exit_sensor in self.y_index['flow'].keys():\n self.qout_obs.append(y_obs[self.y_index['flow'][self.__debug_exit_sensor]])\n else:\n self.qout_obs.append(np.nan)\n # =======================================================================\n # save the estimated state\n self.est_state_all[:, step] = est_state\n\n # decouple and save into self.est_density, self.est_speed, self.est_queue, self.est_traveltime\n self.est_density[:, step] = est_state[0:self.num_cells, 0]\n\n # the speed is computed using the fundamental diagram\n for cell_id in range(0, self.num_cells):\n # use the static FD at this step\n self.est_speed[cell_id, step] = self.__rho2v(self.vm_cells[cell_id, 0], self.beta_cells[cell_id, 0],\n self.rhoc_cells[cell_id, 0], self.wc_cells[cell_id, 0],\n self.est_density[cell_id, step])\n\n # REMARK: the queue and travel time a post-processed from the speed field.\n # They are computed in cross_evaluation class for all algorithms\n # the queue length starts from the first cell with speed below queue_threshold to the end of road\n # index = (self.est_speed[:, step] <= self.queue_threshold)\n #\n # # filter out the outliers\n # index_smoothed = deepcopy(index)\n # outlier_max = 3\n # counter = 0\n # for i in range(0, len(index)):\n #\n # if index[i] == True:\n # # trigger the coutner\n # counter += 1\n # elif index[i] == False and counter != 0:\n # if counter <= outlier_max:\n # # found outliers\n # index_smoothed[ i-counter : i ] = False\n # # reset counter\n # counter = 0\n #\n # # if i != 0 and i != len(index)-1:\n # # if sum( index[i-1:i+3] ) >=2:\n # # index_smoothed[i] = True\n # # else:\n # # index_smoothed[i] = False\n # # elif i == 0:\n # # if sum(index[0: 5] ) >= 3:\n # # index_smoothed[i] = True\n # # else:\n # # index_smoothed[i] = False\n # # elif i == len(index)-1:\n # # if sum(index[ i-4 :len(index)]) >= 3:\n # # index_smoothed[i] = True\n # # else:\n # # index_smoothed[i] = False\n #\n # if sum(index_smoothed) <= 3: # use 4 to suppress false alarms\n # # if less or equal then 2 cells are in congestion, it may be caused by noise.\n # self.est_queue[step] = 0\n # else:\n # # if step > 105 and step < 115:\n # # print(sum(index_smoothed))\n # # print(index_smoothed)\n # # print(index)\n #\n # self.est_queue[step] = \\\n # self.len_cells*( self.num_cells - np.argmax(index_smoothed) )\n # # try:\n # # first_cong_cell_id = [x[0] for x in enumerate( self.est_speed[:,step] ) if x[1] < self.queue_threshold][0]\n # # except IndexError:\n # # # no congested cell\n # # first_cong_cell_id = self.num_cells\n # # # the estimated queue length\n # # self.est_queue[step] = self.len_cells*( self.num_cells - first_cong_cell_id )\n #\n # # the travel time estimate is computed by summing up the travel time in each cell\n # self.est_traveltime[step] = np.sum(self.len_cells/self.est_speed[:,step])\n\n\n # =======================================================================\n # DEBUG\n # plot the update\n if self.__debug:\n plot_len = 19\n # qin\n if False:\n if not np.isnan(self.qin_obs[-1]):\n fig1 = plt.figure(figsize=(10, 5), dpi=100)\n ax1 = fig1.add_subplot(111)\n positions_f = np.arange(0, len(self.qin_f)) - 0.1\n positions_a = np.arange(0, len(self.qin_a)) + 0.1\n positions_obs = np.arange(0, len(self.qin_obs))\n # predicted as red\n bp = ax1.boxplot(self.qin_f[-plot_len:],\n positions=positions_f[-plot_len:], widths=0.15,\n patch_artist=False)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#FF4633', linewidth=1)\n # change fill color\n # box.set( facecolor = '#FF4633' )\n # corrected as green\n bp = ax1.boxplot(self.qin_a[-plot_len:],\n positions=positions_a[-plot_len:], widths=0.15, patch_artist=False)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#07891B', linewidth=1)\n # change fill color\n # box.set( facecolor = '#07891B' )\n # measurement as blue\n ax1.scatter(positions_obs[-plot_len:], self.qin_obs[-plot_len:], color='b', marker='o', s=40,\n label='Observation')\n ax1.set_title('qin')\n # x_ticks = np.arange(0, len(self.qin_f))\n # ax1.set_xticks(x_ticks[-plot_len:])\n plt.show()\n\n # qout\n if False:\n if not np.isnan(self.qout_obs[-1]):\n fig2 = plt.figure(figsize=(10, 5), dpi=100)\n ax2 = fig2.add_subplot(111)\n positions_f = np.arange(0, len(self.qout_f)) - 0.1\n positions_a = np.arange(0, len(self.qout_a)) + 0.1\n positions_obs = np.arange(0, len(self.qout_obs))\n # predicted as red\n bp = ax2.boxplot(self.qout_f[-plot_len:], positions=positions_f[-plot_len:], widths=0.18,\n patch_artist=True)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#7570b3', linewidth=1)\n # change fill color\n box.set(facecolor='#FF4633')\n # corrected as green\n bp = ax2.boxplot(self.qout_a[-plot_len:], positions=positions_a[-plot_len:], widths=0.18,\n patch_artist=True)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#7570b3', linewidth=1)\n # change fill color\n box.set(facecolor='#07891B')\n # measurement as blue\n ax2.scatter(positions_obs[-plot_len:], self.qout_obs[-plot_len:], color='b', marker='o', s=30,\n label='Observation')\n ax2.set_title('qout')\n # x_ticks = np.arange(0, len(self.qout_f))\n # ax2.set_xticks(x_ticks[-plot_len:])\n\n plt.show()\n\n # plot the estimated qin and qout\n if self.__debug:\n if True:\n qin = np.squeeze(np.array(self.est_state_all[self.x_index['qin'], :]))\n qin_meas = np.array(self.qin_obs)[1:]\n print(len(qin), len(qin_meas))\n fig1 = plt.figure(figsize=(10, 5), dpi=100)\n ax1 = fig1.add_subplot(111)\n t = np.arange(len(qin))\n ax1.plot(t, qin, 'r-', label='Estimated')\n not_nan = ~np.isnan(qin_meas)\n ax1.plot(t[not_nan], qin_meas[not_nan], 'b', label='Measured')\n ax1.legend()\n ax1.grid(True)\n ax1.set_title('qin')\n\n plt.draw()\n\n if True:\n qout = np.squeeze(np.array(self.est_state_all[self.x_index['qout'], :]))\n qout_meas = np.array(self.qout_obs)[1:]\n fig2 = plt.figure(figsize=(10, 5), dpi=100)\n ax2 = fig2.add_subplot(111)\n t = np.arange(len(qout))\n ax2.plot(t, qout, 'r-', label='Estimated')\n not_nan = ~np.isnan(qout_meas)\n ax2.plot(t[not_nan], qout_meas[not_nan], 'b', label='Measured')\n ax2.set_title('qout')\n ax2.legend()\n ax2.grid(True)\n plt.draw()", "def transform(self, chunks):\n data = np.array([chunk.flatten() for chunk in chunks])\n\n return data", "def __getitem__(self, index):\n # get the indexs of each batch\n batch_indexs = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n # using batch_indexs to get path of current batch\n batch_path = [self.X_path[k] for k in batch_indexs]\n # get batch data\n batch_x, batch_y = self.data_generation(batch_path)\n return batch_x, batch_y", "def batch_data(source, target, batch_size):\n for batch_i in range(0, len(source)//batch_size):\n start_i = batch_i * batch_size\n source_batch = source[start_i:start_i + batch_size]\n target_batch = target[start_i:start_i + batch_size]\n yield np.array(pad_sentence_batch(source_batch)), np.array(pad_sentence_batch(target_batch))", "def process_batch_static(batch, subset_size, tokeniser, model, idx2word, word2uniquevec):\r\n ## initialisation\r\n new_token_ids = []\r\n word2manyvec = {}\r\n streaming_batch_size = len(batch)\r\n\r\n start = time.time()\r\n\r\n ## processing the batch one subset at a time\r\n for s in range(0, streaming_batch_size, subset_size):\r\n print(\"Processing subset \" + str(s // subset_size + 1))\r\n if s + subset_size < len(batch):\r\n batch_subset = batch[s:s + subset_size]\r\n else:\r\n batch_subset = batch[s:]\r\n # iteratively update idx2word, new_token_ids, word2manyvec throughout the batch\r\n idx2word, new_token_ids, word2manyvec = process_subset_static(batch_subset, tokenizer, model,\r\n idx2word, new_token_ids,\r\n word2uniquevec, word2manyvec, selected_layer=1)\r\n print(\"Number of word vectors so far: \" + str(len(idx2word)))\r\n\r\n # for every new word discover, find unique vector representation by taking first PCA prinicipal component\r\n for word, veclist in word2manyvec.items():\r\n if len(veclist) == 1: # otherwise we would just get the first standard unit vector from PCA\r\n word2uniquevec[word] = torch.Tensor(veclist[0])\r\n else:\r\n pca = PCA(n_components=1)\r\n pca.fit(veclist)\r\n word2uniquevec[word] = torch.Tensor(pca.components_[0])\r\n\r\n end = time.time()\r\n print(\"Total time: \" + str(round(end - start, 2)) + \" s.\")\r\n\r\n return word2uniquevec, idx2word, new_token_ids", "def process_batch(self, data):\n [embedding_batch] = self._sess.run([self._embedding_tensor],\n feed_dict={self._features_tensor: data})\n return embedding_batch", "def on_batch(self, x, y):", "def _process_data(rdd_entry, feature_list):\n events = []\n for event in rdd_entry:\n events.append(event[RDD_EVENT])\n return IptablesIngestor.vectorize_events(events, feature_list)", "def __getitem__(self, index):\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n return X, y", "def __getitem__(self, index):\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n return X, y", "def process(self, data_batch: Any, predictions: Sequence[dict]) -> None:\n self.results.extend(_to_cpu(predictions))", "def _train_batch(self):\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n result = self._batch_iter(source, target, i)\n\n # yield\n yield result", "def get_batch(X, Y, iteration):\n offset = 100\n start = iteration * offset % len(Y)\n \n # YOUR CODE HERE\n # This will return the entire data set each iteration. This is costly, so\n # you should experiment with different way of changing this:\n return X[start: start + offset], Y[start: start + offset]", "def collect_datset(self):\n response = requests.get(self.url)\n lines = response.text.splitlines()\n data = []\n for item in lines:\n item = item.split(\",\")\n data.append(item)\n data.pop(0) # to remove labels from list\n dataset = np.matrix(data)\n return dataset", "def _get_item(self, index):\n data, label = self.data[index], self.label[index]\n coordmax = np.max(data, axis=0)\n coordmin = np.min(data, axis=0)\n nsubvolume_x = np.ceil((coordmax[0]-coordmin[0])/1.5).astype(np.int32)\n nsubvolume_y = np.ceil((coordmax[1]-coordmin[1])/1.5).astype(np.int32)\n batch_data, batch_label = [], []\n for i in range(nsubvolume_x):\n for j in range(nsubvolume_y):\n curmin = coordmin + [i*1.5, j*1.5, 0]\n curmax = coordmin+ [(i+1)*1.5, (j+1)*1.5, coordmax[2]-coordmin[2]]\n crop_ids = np.sum((data>=(curmin-0.2)) * (data<=(curmax+0.2)), axis=1) == 3\n if sum(crop_ids) == 0: continue\n crop_data = data[crop_ids]\n crop_label = label[crop_ids]\n mask = np.sum((crop_data>=(curmin-0.001)) * (crop_data<=(curmax+0.001)), axis=1) == 3\n ids = np.random.choice(crop_label.size, self.npoints, replace=True)\n this_data = crop_data[ids]\n this_label = crop_label[ids]\n this_mask = mask[ids]\n if sum(this_mask) * 1. / this_mask.size < 0.01: continue\n this_label *= this_mask\n if self.normalize:\n this_data = utils.normalize_point_cloud(this_data)\n batch_data.append(this_data[None,:,:])\n batch_label.append(this_label[None,:])\n batch_data = np.concatenate(tuple(batch_data), axis=0)\n batch_label = np.concatenate(tuple(batch_label), axis=0)\n return batch_data, batch_label", "def run(self):\r\n for pipe in self.inputs:\r\n for row in pipe.rows():\r\n self.put(row)", "def next(self):\n\n if self.i_sample < self.n_sample:\n df_batch = self.grouped[self.i_sample:min(self.n_sample, self.i_sample + self.batch_size)]\n # at end of epoch, number of sample remains may be smaller than batch size\n if len(df_batch) < self.batch_size:\n df_sample = random.sample(self.grouped, self.batch_size-len(df_batch))\n df_batch = df_batch + df_sample\n try:\n assert len(df_batch) == self.batch_size\n except AssertionError:\n print(self.i_sample, df_sample, df_batch)\n\n # get random frame_idxs\n if self.train:\n flips = np.random.choice(a=[False, True], size=(self.batch_size,), p=[0.5, 0.5])\n else:\n flips = np.zeros(self.batch_size, dtype=bool)\n\n\n video = sample_clips(df_batch, flips, self.batch_size, self.n_frame,\n self.scale_w, self.scale_h, self.sample_half_time, self.train)\n\n bboxes = np.zeros((self.batch_size, self.n_frame // self.temporal_scale, self.n_bbox, 5))\n labels = np.zeros((self.batch_size, self.n_bbox, self.num_class))\n for i in range(len(df_batch)):\n tmp_bbox, tmp_label = self.get_bbox_and_label(df_batch[i], flips[i], i, self.scale_w, self.scale_h)\n bboxes[i] = tmp_bbox\n labels[i] = tmp_label\n\n if self.debug_dataloader:\n with open('dataset/AVA_v2.1/ava_action_list_v2.1.pbtxt') as fd:\n lines = fd.readlines()\n\n labels_info = []\n for i in range(80):\n name_line = lines[i * 5 + 1]\n label_id_line = lines[i * 5 + 2]\n label_type_line = lines[i * 5 + 3]\n\n name = name_line[name_line.find('\"') + 1:name_line.rfind('\"')]\n label_id = int(label_id_line.strip().split(':')[1].strip())\n label_type = label_type_line.strip().split(':')[1].strip()\n\n assert label_id == i + 1\n labels_info.append({\n 'name': name,\n 'label_type': label_type\n })\n\n for bidx in range(self.batch_size):\n s_video = video[bidx, ...]\n s_bboxes = bboxes[bidx, ...]\n s_labels = labels[bidx, ...]\n\n window_name = 'batch_idx_'+str(bidx)\n if self.train:\n window_name += '_train'\n else:\n window_name += '_val'\n\n\n bbox = s_bboxes[0, 0, 1:].astype(np.int32)\n label_indices = np.where(s_labels[0, :])[0]\n\n for fidx in range(self.n_frame):\n # print('fidx', fidx)\n save_name = window_name + '_' + str(fidx)\n tmp_img = (s_video[:, fidx, :, :].transpose((1,2,0))).astype(np.uint8).copy()\n\n cv2.rectangle(tmp_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=(0,0,255), thickness=2)\n for en_idx, label_index in enumerate(label_indices):\n # print('label_index', label_index, 'len', len(labels_info))\n cv2.putText(tmp_img, labels_info[label_index]['name'], (bbox[0], bbox[1] + en_idx * 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, color=(0, 255, 0), thickness=1)\n\n cv2.imwrite(save_name+'.jpg', tmp_img)\n\n\n #print(video.shape, bboxes.shape, labels.shape)\n ret = mx.io.DataBatch(data=[mx.nd.array(video), mx.nd.array(bboxes)],\n label=[mx.nd.array(labels),],\n provide_data=self.provide_data,\n provide_label=self.provide_label)\n\n self.i_sample += self.batch_size\n return ret\n else:\n raise StopIteration", "def predict_collect(self, src, collector): # real signature unknown; restored from __doc__\n pass", "def execute_collect(prog, _input):\n gen = execute(prog, _input)\n out = []\n for output in gen:\n out.append(output)\n return out", "def _data(self) -> List[Data]:\n data = []\n sample_ids, repeated_sample_ids = set(), set()\n for datum in self.collection.data.filter(\n type=self.process_type,\n status=\"OK\",\n ordering=\"-created\",\n fields=self.DATA_FIELDS,\n ):\n # 1 Filter by newest datum in the sample\n if datum.sample.id in sample_ids:\n repeated_sample_ids.add(datum.sample.id)\n continue\n\n # 2 Filter by genes, if geneset is given\n if self.geneset:\n obj_geneset = self._get_obj_geneset(datum)\n if not self.geneset.issubset(obj_geneset):\n warnings.warn(\n f\"Sample {datum.sample} (Data {datum.id}) does not \"\n \"contain the genes requested in geneset input.\"\n )\n continue\n\n sample_ids.add(datum.sample.id)\n data.append(datum)\n\n if repeated_sample_ids:\n repeated = \", \".join(map(str, repeated_sample_ids))\n warnings.warn(\n f\"The following samples have multiple data of type {self.process_type}: \"\n f\"{repeated}. Using only the newest data of this sample.\",\n UserWarning,\n )\n\n if not data:\n raise ValueError(\n f\"Collection {self.collection.name} has no {self.process_type} \"\n \"data or there is no data with the requested mutations.\"\n )\n\n return data", "def make_vector_batches(data, nbatches, batch_size=None):\n print \"---->\\n.....Putting into vector-shaped batches\"\n if batch_size==None:\n batch_size = int(data['images'].shape[0]/nbatches)\n else:\n assert nbatches * batch_size <= data['images'].shape[0]\n permut = permutation(data['images'].shape[0])\n xdata = []\n ydata = []\n for i in range(nbatches):\n xs = data['images'][permut[i * batch_size:(i + 1) * batch_size],\n :, :, :]\n xdata.append(reshape(xs, (batch_size, prod(xs.shape) / batch_size)))\n ydata.append(data['labels'][permut[i * batch_size:(i + 1)\n * batch_size]])\n print \"---->\\n.....Done!\"\n return [np.reshape(np.asarray(xdata), (nbatches, batch_size, -1)),\n np.asarray(ydata)]", "def make_X(self, dataset):\n\t\tprint(\"Making X\", dataset)\n\t\tself.X[dataset] = []\n\t\tfor document in self.tokenized_documents[dataset]:\n\t\t\tarray = np.array([self.worddict.get(word, self.worddict[\"__oov__\"]) for word in document])\n\t\t\tself.X[dataset].append(array)" ]
[ "0.61692625", "0.6092687", "0.60661197", "0.60572034", "0.6030449", "0.59338397", "0.59050703", "0.5902262", "0.57682383", "0.57110006", "0.5708979", "0.5663044", "0.565785", "0.56574595", "0.5642957", "0.5642957", "0.56295526", "0.56236845", "0.5613696", "0.55893534", "0.55703306", "0.5562166", "0.5559824", "0.55445695", "0.5521423", "0.55092984", "0.5503262", "0.5503047", "0.54959637", "0.5492849", "0.5480282", "0.5470206", "0.54630476", "0.5461806", "0.54547125", "0.5450007", "0.5437941", "0.5435673", "0.54264814", "0.54243916", "0.5421097", "0.5402924", "0.5390238", "0.5389472", "0.53841025", "0.5383909", "0.5379029", "0.53783053", "0.53776735", "0.5356075", "0.5353979", "0.53521305", "0.5350179", "0.53491825", "0.53488094", "0.53470874", "0.53430754", "0.53419155", "0.5328149", "0.5328149", "0.5325274", "0.53186864", "0.53161365", "0.5306248", "0.5305788", "0.5305725", "0.529648", "0.52964586", "0.529601", "0.52904546", "0.5289833", "0.52893025", "0.5286022", "0.5285557", "0.52837056", "0.5282482", "0.52798766", "0.5275322", "0.52741426", "0.52723324", "0.52663684", "0.5266054", "0.5263918", "0.52505964", "0.5247908", "0.5244915", "0.524122", "0.5235599", "0.5235599", "0.5229799", "0.52275616", "0.52204376", "0.52170086", "0.52085793", "0.5204372", "0.52029645", "0.52002835", "0.51889265", "0.5187", "0.5184702", "0.5182251" ]
0.0
-1
collect from a batch of VoxWave Dataset.
def collate_fn(batch): file = [item["file"] for item in batch] wave = torch.cat([item["wave"] for item in batch], dim=0) return {"file": file, "wave": wave}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data(self, train=True, batch_size=2):\n if train:\n elements = self.prepare_batch(self.training_albums)\n else:\n elements = self.prepare_batch(self.validation_albums)\n\n while len(elements) > 0:\n # Collect the batch\n batch = []\n for _ in range(min(batch_size, len(elements))):\n batch.append(elements.pop())\n\n # Get same sequence size for all elements of the batch\n albums, labels = self.batchify(batch)\n yield albums, labels", "def process(self, data_batch: Any, data_samples: Sequence[dict]) -> None:", "def extract(self):\n\n # print some infos about data\n print(\"\\n--extract batches from data:\\ntrain: {}\\nval: {}\\ntest: {}\\n\".format(self.data[0]['x'].shape, self.data[1]['x'].shape, self.data[2]['x'].shape))\n\n # create batches\n self.x_train, self.y_train, _ = self.create_batches(self.data[0], batch_size=self.batch_size)\n self.x_val, self.y_val, _ = self.create_batches(self.data[1], batch_size=self.batch_size_eval)\n self.x_test, self.y_test, _ = self.create_batches(self.data[2], batch_size=self.batch_size_eval)\n\n # my data\n if len(self.mfcc_data_files) == 4:\n self.x_my, self.y_my, self.z_my = self.create_batches(self.data[3], batch_size=1)", "def split_and_load(batch_data, num_gpus):\n return [batch_data[i].data[0] for i in range(num_gpus)], \\\n [batch_data[i].label[0].as_in_context(mx.gpu(i)) for i in range(num_gpus)]", "def collect_fn(batch):\r\n # max_detection = max(list(map(lambda x: len(x[5]), batch)))\r\n max_detection = max(list(map(lambda x: len(x), batch)))\r\n for i in range(len(batch)):\r\n batch[i] = list(batch[i]) # because the element in the batch is a tuple\r\n dummy = torch.zeros((1,5), dtype=batch[i][5].dtype)\r\n temp = batch[i][5]\r\n # make the detection to the same length in order to stack the\r\n while temp.size(0) < max_detection:\r\n temp = torch.cat((temp, dummy))\r\n batch[i][5] = temp\r\n \r\n return default_collate(batch)", "def _get_batch_data(self, batch):\n try:\n encoders = [ encoder for encoder in self._data_encoder ]\n except:\n encoders = (self._data_encoder,)\n\n try:\n data_batches = [ encoder.transform_batch(rec for _, rec in batch.iterrows())\n for encoder in encoders ]\n except AttributeError:\n data_batches = [\n [ self._get_data(record, encoder) for _, record in batch.iterrows() ]\n for encoder in encoders ]\n\n try:\n batches = [ np.array(encoder.finalize_batch(batch))\n for encoder, batch in zip(encoders, data_batches)]\n except AttributeError:\n batches = [ np.array(batch) for batch in data_batches ]\n\n return batches if len(batches) > 1 else batches[0]", "def data_gen(\n v: int, batch: int, nbatches: int, device: torch.device = torch.device(\"cpu\")\n) -> Iterator[Batch]: # TODO bad name\n for i in range(nbatches):\n data = np.random.randint(1, v, size=(batch, 10))\n data[:, 0] = 1\n src: LongTensorType = torch.from_numpy(data)\n tgt: LongTensorType = torch.from_numpy(data)\n src, tgt = src.to(device), tgt.to(device)\n yield Batch(src, tgt, 0)", "def _get_batch_data(batch, ctx):\n data, label = batch\n return (mx.gluon.utils.split_and_load(data, ctx),\n mx.gluon.utils.split_and_load(label, ctx),\n data.shape[0])", "def collect(self):\r\n for idx_ds, ds in enumerate(self.datasets):\r\n # print \"collect() using ds = \", repr(ds)\r\n reader_rows = ds.dict_reader()\r\n for row in reader_rows:\r\n # print \"Collect: idx_ds=%d\" % idx_ds\r\n for summary in self.summaries:\r\n #Derive coords from the row for this summary\r\n coords=[]\r\n for sum_col in summary.columns:\r\n level = str(row[sum_col.name])\r\n if level is not None and level != \"None\": \r\n if level.find('.') != -1:\r\n # May be a float value with .0 ending to trim\r\n try:\r\n # If value is parsable as a float, and it \r\n # is an integer, represent it as an integer.\r\n flevel = float(level)\r\n # Strip a final .0 from the string.\r\n level = (\r\n str(int(flevel)) if flevel == int(flevel)\r\n else str(level))\r\n except:\r\n # Not a float, OK.\r\n pass\r\n else:\r\n level = \"\"\r\n coords.append(level)\r\n #print \"coords:\", repr(coords)\r\n #Register row data into this summary.\r\n cell = summary.cell(coords)\r\n #Future, along with ds_index, could also pass along \r\n # row's ordinal column values.\r\n # Note to self: rename accrue_row to accrue_row() \r\n # when get into eclipse env\r\n cell.entry.accrue_row(idx_ds)", "def process(self, data_batch: Sequence[Dict],\n data_samples: Sequence[Dict]) -> None:\n for data_sample in data_samples:\n pred_labels = data_sample.get('pred_instances').get(self.key).cpu()\n gt_labels = data_sample.get('gt_instances').get(self.key).cpu()\n\n result = dict(\n pred_labels=pred_labels.flatten(),\n gt_labels=gt_labels.flatten())\n self.results.append(result)", "def create_features_from_vids():\n\n dtype = get_dtype()\n feature_extractor = FeatureExtractor()\n feature_extractor.eval()\n feature_extractor.type(dtype)\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n dataset = StrokesDataset('../dataset/my_dataset/patches/labels.csv', '../dataset/my_dataset/patches/',\n transform=transforms.Compose([ToTensor(), normalize]), use_features=False)\n batch_size = 32\n count = 0\n for vid in dataset:\n count += 1\n frames = vid['frames']\n print(len(frames))\n\n features = []\n for batch in frames.split(batch_size):\n batch = batch.type(dtype)\n with torch.no_grad():\n # forward pass\n batch_features = feature_extractor(batch)\n features.append(batch_features.cpu().numpy())\n\n df = pd.DataFrame(np.concatenate(features, axis=0))\n\n outfile_path = os.path.join('../dataset/my_dataset/patches/', os.path.splitext(vid['vid_name'])[0] + '.csv')\n df.to_csv(outfile_path, index=False)\n\n print(count)", "def split_and_load(batch, ctx_list):\n new_batch = []\n for i, data in enumerate(batch):\n if isinstance(data, (list, tuple)):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n else:\n new_data = [data.as_in_context(ctx_list[0])]\n new_batch.append(new_data)\n return new_batch", "def _batchify(self, data_containers: Dict, batch_size):\n\n X = Variable(torch.LongTensor(data_containers['X'])).to(self.device)\n Y = Variable(torch.FloatTensor(data_containers['Y'])).to(self.device)\n\n data_size = X.size()[0]\n num_batches = data_size // batch_size\n\n return [\n (X[bi * batch_size: (bi + 1) * batch_size],\n Y[bi * batch_size: (bi + 1) * batch_size].unsqueeze(1))\n for bi in range(num_batches + 1)\n ]", "def __iter__(self):\n batch = []\n for sample in self.dataset:\n batch.append(sample)\n if len(batch) == self.size:\n yield self.transform(batch)\n batch = []\n if batch:\n # the last batch may be less then batch size.\n yield self.transform(batch)", "def split_and_load(batch, ctx_list):\n num_ctx = len(ctx_list)\n new_batch = []\n for i, data in enumerate(batch):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n new_batch.append(new_data)\n return new_batch", "def split_and_load(batch, ctx_list):\n num_ctx = len(ctx_list)\n new_batch = []\n for i, data in enumerate(batch):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n new_batch.append(new_data)\n return new_batch", "def _data_generation(self, batch_data):\n # Initialization\n batch_x = []\n batch_y = defaultdict(list)\n\n for ind, item_data in batch_data.iterrows():\n img_path = os.path.join(self.img_dir, \"images\", \"rgb\", item_data[\"name\"])\n img = cv2.imread(img_path)\n try:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n except Exception as error:\n print(img_path)\n print(error)\n not_valid_mask = self.read_masks_borders(item_data[\"name\"])\n img[not_valid_mask] = 0\n\n # getmasks\n targets = np.zeros((img.shape[0], img.shape[1], len(self.classes)))\n for i, c in enumerate(self.classes):\n mask_path = os.path.join(self.img_dir, \"labels\", c, item_data[\"name\"])\n mask = cv2.imread(\n mask_path.replace(\".jpg\", \".png\"), cv2.IMREAD_GRAYSCALE\n )\n mask[not_valid_mask[:, :, 0]] = 0\n mask = mask > 0\n targets[:, :, i] = mask\n\n res = self.reshape_func(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n if self.do_aug:\n res = self.aug(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n\n for i, c in enumerate(self.classes):\n batch_y[c].append(targets[:, :, i])\n\n batch_x.append(img)\n\n batch_x = np.array(batch_x, np.float32)\n batch_y = {k: np.array(v, np.float32) for k, v in batch_y.items()}\n batch_y = {k: np.expand_dims(v, axis=-1) for k, v in batch_y.items()}\n\n return (\n imagenet_utils.preprocess_input(batch_x, \"channels_last\", mode=\"tf\"),\n batch_y\n )", "def batch_data(cls, train_data, train_labels, batch_size):\n for batch in range(int(np.ceil(train_data.shape[0] / batch_size))):\n start = batch_size * batch\n end = start + batch_size\n if end > train_data.shape[0]:\n yield batch, (train_data[start:train_data.shape[0]], \\\n train_labels[start:train_data.shape[0]])\n else:\n yield batch, (train_data[start:end], \\\n train_labels[start:end])", "def __getitem__(self, index):\r\n\r\n # Generate indexes of the batch\r\n indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]\r\n\r\n # Find list of IDs\r\n list_ids_temp = [self.list_IDs[k] for k in indexes]\r\n\r\n # Calls function to load batch of data into memory\r\n X, y = self.__data_generation(list_ids_temp)\r\n\r\n return X, y", "def _get_batch(self):\n # index = self._index[self._current]\n # im_path = self._imdb.image_path_from_index(0)\n # im_path = 'data/demo/dog.jpg'\n # with open(im_path, 'rb') as fp:\n # img_content = fp.read()\n\n batch_data = mx.nd.zeros((self.batch_size, 3, self._data_shape[0], self._data_shape[1]))\n batch_label = [] \n global imgi\n # img = mx.nd.array(imgi)\n # imgr = mx.img.imdecode(img_content)\n data = self._data_augmentation(imgi)\n batch_data[0] = data\n \n self._data = {'data': batch_data}\n self._label = {'label': None}", "def prepareDataBatches(self, traindata, trainlabel):\n index = np.random.permutation(len(traindata))\n traindata = traindata[index]\n trainlabel = trainlabel[index]\n split_no = int(len(traindata) / self.batchSize)\n return zip(np.split(traindata[:split_no*self.batchSize], split_no), np.split(trainlabel[:split_no*self.batchSize], split_no))", "def _proc_dataset(d):\n # merge 2dseq complex frame group if present\n if d.is_complex and d.type == '2dseq':\n d = FrameGroupMerger().merge(d, 'FG_COMPLEX')\n\n # prepare the data array\n if d.is_svs:\n data = _prep_data_svs(d)\n elif d.is_mrsi:\n data = _prep_data_mrsi(d)\n else:\n data = d.data\n\n # get properties\n properties = d.to_dict()\n\n # some Bruker datasets do not have affine property\n if d.type == 'fid': if not 'affine' in properties: properties.update({'affine':np.identity(4)})\n \n yield data, properties", "def _batch(self, X, y, batch_size=16):\n X, y = self._shuffle(X, y) # shuffle the data\n self.batches = []\n idx = 0\n while idx < len(X):\n batch = (X[idx:idx+batch_size], y[idx:idx+batch_size])\n self.batches.append(batch)\n idx += batch_size", "def __data_generation(self, rows):\n samples = np.zeros((rows, self.image_width, self.image_height, self.image_depth))\n targets = np.zeros((rows, self.image_width, self.image_height, self.num_classes))\n for j in range(rows):\n for row1, row2 in zip(self.reader1, self.reader2):\n array_row1 = np.array(row1, dtype=np.float)\n samples[j,:,:,:] = preprocess_feature(array_row1,\n self.image_width, self.image_height, self.image_depth)\n try:\n next(self.reader1)\n except StopIteration:\n print(\"CSV iteration end for feature. Calling 'break'.\")\n break\n\n array_row2 = np.array(row2, dtype=np.int)\n targets[j,:,:,:] = preprocess_label(array_row2,\n self.image_width, self.image_height, self.num_classes)\n try:\n next(self.reader2)\n except StopIteration:\n print(\"CSV iteration end for label. Calling 'break'.\")\n break\n\n return samples, targets", "def train_datas(self, batch_size):\r\n if not isinstance(batch_size, int):\r\n raise ValueError('In Dataset, batch_size should be int, get '\r\n '{}'.format(type(batch_size)))\r\n if batch_size <= 0:\r\n raise ValueError('In Dataset, batch_size should larger equal to '\r\n '1, get {}'.format(batch_size))\r\n \r\n indices = list(range(self.size))\r\n np.random.shuffle(indices)\r\n\r\n epoch_size = self.size // batch_size * batch_size\r\n self._train_datas = self._train_datas[indices][:epoch_size] # [epoch_size, ...]\r\n self._train_labels = self._train_labels[indices][:epoch_size] # [epoch_size, ...]\r\n \r\n datas = []\r\n for i in range(self.size // batch_size):\r\n # for label, we have box and landmark which is 0.\r\n datas.append([self._train_datas[i*batch_size:(i+1)*batch_size], \r\n self._train_labels[i*batch_size:(i+1)*batch_size]])\r\n return datas", "def train_data() -> Iterator[Tuple[Label, ChanneledImage]]:\n return zip(*get_data(TRAIN_FILES, 60000))", "def _extract_batch(self, data, batch_size):\n\n batch_size = batch_size or BATCH_SIZE\n\n batch = []\n try:\n for i in range(batch_size):\n batch.append(data.next())\n except StopIteration:\n pass\n\n return batch", "def process(dataset, f):\n logger.info('processing dataset ({0})'.format(len(dataset.samples)))\n for sample in dataset.samples:\n sample.proc = f(sample.image)", "def fetch_data(data, count, idx_batch, vocab_size):\r\n batch_size = len(idx_batch)\r\n data_batch = np.zeros((batch_size, vocab_size))\r\n count_batch = []\r\n mask = np.zeros(batch_size)\r\n indices = []\r\n values = []\r\n for i, doc_id in enumerate(idx_batch):\r\n if doc_id != -1:\r\n for word_id, freq in data[doc_id].items():\r\n data_batch[i, word_id] = freq\r\n count_batch.append(count[doc_id])\r\n mask[i]=1.0\r\n else:\r\n count_batch.append(0)\r\n return data_batch, count_batch, mask", "def collect_atlas(cls):\n yield from cls.collect()", "def next_simple_dataset(dataset, batch_size: int, datatype):\n while True:\n x_batch = []\n y_batch = []\n for i in range(batch_size):\n try:\n x, y, data_unit, index = create_xy(dataset, datatype)\n # x = normalize(x)\n x_batch.append(x)\n y_batch.append(y)\n except StopIteration:\n break\n x_batch, y_batch = np.array(x_batch), np.array(y_batch)\n if datatype != DataType.test:\n x_batch = SEQ_CVXTZ.augment_images(x_batch).astype(\"float32\")\n x_batch = np.array([normalize(x) for x in x_batch])\n # org_shape = x_batch.shape\n # org_width = x_batch.shape[1]\n # corner = int((org_width - ROI_IMAGE_SIZE) // 2)\n # print(f\"0: org_shape:{org_shape} x_batch:{x_batch.shape} corner:{corner}\")\n # x_batch = x_batch[:, corner:(org_width - corner), corner:(org_width - corner), :]\n # resized_x_batch = []\n # for x in x_batch:\n # img = Image.fromarray(np.uint8(x))\n # img = img.resize((IMAGE_SIZE, IMAGE_SIZE), Image.LANCZOS)\n # resized_x_batch.append(normalize(np.array(img)))\n # print(f\"1: org_shape:{org_shape} corner:{corner} x_batch:{x_batch.shape}\")\n # yield np.array(resized_x_batch), y_batch\n yield np.array(x_batch), y_batch", "def fetch_data(data, count, idx_batch, vocab_size):\r\n batch_size = len(idx_batch)\r\n data_batch = np.zeros((batch_size, vocab_size))\r\n count_batch = []\r\n mask = np.zeros(batch_size)\r\n for i, doc_id in enumerate(idx_batch):\r\n if doc_id != -1:\r\n for word_id, freq in data[doc_id].items():\r\n data_batch[i, word_id] = freq\r\n count_batch.append(count[doc_id])\r\n mask[i]=1.0\r\n else:\r\n count_batch.append(0)\r\n return data_batch, count_batch, mask", "def my_detection_collate(batch):\n targets_1 = []\n imgs = []\n for sample in batch:\n # each sample is the result of one query on the dataset object\n imgs.append(sample[0])\n targets_1.append(torch.FloatTensor(sample[1]))\n return torch.stack(imgs, 0), targets_1", "def batch_iter(input_data,batch_size):\r\n batch_ids,batch_mask,batch_segment,batch_label=[],[],[],[]\r\n for features in input_data:\r\n if len(batch_ids) == batch_size:\r\n yield batch_ids,batch_mask,batch_segment,batch_label\r\n batch_ids, batch_mask, batch_segment, batch_label = [], [], [], []\r\n\r\n batch_ids.append(features['input_ids'])\r\n batch_mask.append(features['input_mask'])\r\n batch_segment.append(features['segment_ids'])\r\n batch_label.append(features['label_ids'])\r\n\r\n if len(batch_ids) != 0:\r\n yield batch_ids, batch_mask, batch_segment, batch_label", "def collect_fn_local(batch):\r\n max_detection = max(list(map(lambda x: len(x[4]), batch)))\r\n for i in range(len(batch)):\r\n batch[i] = list(batch[i]) # because the element in the batch is a tuple\r\n dummy = torch.zeros((1,128,64), dtype=batch[i][4][0].dtype)\r\n temp = batch[i][4]\r\n # make the detection to the same length in order to stack the\r\n while temp.size(0) < max_detection:\r\n # while len(temp) < max_detection:\r\n temp = torch.cat((temp, dummy))\r\n # temp.append(dummy)\r\n batch[i][4] = temp\r\n \r\n return default_collate(batch)", "def batch(self, data, size):\n\n return [data[x : x + size] for x in range(0, len(data), size)]", "def set_batch_data():\r\n if not os.path.exists(filepath):\r\n download_data()\r\n for n in range(0,6):\r\n d = read(filepath + flist[n])\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, trts = {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n trts['x'], trts['y'] = d['data'], d['labels']\r\n trtsflag = ['train', 'train', 'train', 'train', 'train', 'test']\r\n\r\n data['flag'] = trtsflag[n]\r\n data[trtsflag[n]] = trts\r\n save_pkl(data, savename=flist[n]+'.pkl')", "def batch_data(images: np.ndarray, targets: np.ndarray, batch_size: int=100) \\\n -> Iterable[Tuple[np.ndarray, np.ndarray]]:", "def __getitem__(self, index):\n # Generate indexes of the batch\n rows = self.metadata_dataframe.iloc[index * self.batch_size:(index + 1) * self.batch_size]\n names = rows['Name']\n\n rng = range(index * self.batch_size, (index + 1) * self.batch_size)\n img_files_temp = [names[k] for k in rng]\n # create batch item list\n img_batch_list = []\n meta_batch_list = []\n y_batch_list = []\n for img_file in img_files_temp:\n # Generate data\n print(\"IMAGE FILE:(\")\n print(img_file)\n img, meta, y = self.__data_generation(img_file)\n img_batch_list.append(img)\n meta_batch_list.append(meta)\n y_batch_list.append(y)\n\n # batch_inputs = (img_batch_list, meta_batch_list)\n # return batch_inputs #, y_batch_list\n return [np.array(img),np.array(meta_batch_list)], np.array(y_batch_list)", "def __data_generation(self, batch_data):\n X = np.zeros((self.batch_size, self.num_features), dtype=float)\n y = np.zeros((self.batch_size, self.num_outputs), dtype=float)\n\n for i, sample in batch_data.iterrows():\n # Get lat/long of pickup and dropoff locations\n PULocation = self.taxizone_data.loc[sample['PULocationID']].centroids\n PULocationLong, PULocationLat = PULocation.x, PULocation.y\n DOLocation = self.taxizone_data.loc[sample['DOLocationID']].centroids\n DOLocationLong, DOLocationLat = DOLocation.x, DOLocation.y\n\n # Get month date, day of week and hours/mins for pickup\n PUDateTime = datetime.strptime(sample.tpep_pickup_datetime, '%Y-%m-%d %H:%M:%S')\n PUDate = PUDateTime.strftime('%Y-%m-%d')\n PUYear, PUMonth, PUMonthDate = PUDate.split('-')\n # TODO - Add this to pre-processing of trip data! Some random months in the data!!\n if PUYear != '2018' or PUMonth != '06':\n continue\n PUDayOfWeek = PUDateTime.weekday()\n PUTimeHour, PUTimeMinute = datetime.strptime(\n sample.tpep_pickup_datetime, '%Y-%m-%d %H:%M:%S'\n ).strftime('%H:%M').split(':')\n\n # Get precipitation for that day\n Precipitation = self.weather_data[self.weather_data['DATE'] == PUDate]['PRCP'].values[0]\n\n X[i] = np.concatenate((np.array([\n\n PULocationLat,\n PULocationLong,\n DOLocationLat,\n DOLocationLong,\n abs((PULocationLat - DOLocationLat) ** 2 + abs(PULocationLong - DOLocationLong) ** 2) ** 0.5,\n Precipitation\n ]),\n to_categorical(PUDayOfWeek, 7),\n to_categorical(PUMonthDate, 31),\n to_categorical(PUTimeHour, 24)\n ))\n\n y[i] = [sample['duration']] if self.generator_type == 'duration' \\\n else [sample['total_amount'] - sample['tip_amount']]\n\n return X, y", "def ingest_many(self, data):\n raise NotImplementedError()", "def data_iter(data, vocab, batch_size, shuffle=True, cuda=False):\n\n buckets = defaultdict(list)\n for pair in data:\n src_sent = pair[0]\n buckets[len(src_sent)].append(pair)\n\n batched_data = []\n for src_len in buckets:\n tuples = buckets[src_len]\n if shuffle: np.random.shuffle(tuples)\n batched_data.extend(list(HNCMDataLoader.batch_slice(tuples, batch_size)))\n\n if shuffle:\n np.random.shuffle(batched_data)\n for src_sents, trg_sents, fact_sents in batched_data:\n num_trg_word = sum(len(s[:-1]) for s in trg_sents)\n src_lengths = [len(s) for s in src_sents]\n src_seqs_var = to_input_var(src_sents, vocab.src, cuda)\n trg_seqs_var = to_input_var(trg_sents, vocab.trg, cuda)\n fact_lengths = [[len (s) for s in fact_sent] for fact_sent in fact_sents]\n fact_seqs_var = to_input_var_2d(fact_sents, vocab.src, cuda)\n\n yield {\n 'src_seq': src_seqs_var, 'src_lengths': src_lengths,\n 'fact_seq': fact_seqs_var, 'fact_lengths': fact_lengths,\n 'trg_seq': trg_seqs_var[:, :-1],\n 'target': trg_seqs_var[:, 1:],\n 'num_trg_word': num_trg_word, 'num_trg_seq': len(trg_sents)\n }", "def _run(self, index_list: List[np.ndarray]) -> Iterator[XData]:\n da_it = task_list(index_list, IdReader(), self.worker, self.nworkers)\n xdata_it = (dataarrays_to_xdata(d, self.meta) for d in da_it)\n return xdata_it", "def _dataset():\n dataset = tf_record_dataset(DatasetName.SIGNUM, DatasetType.TRAIN)\n dataset = dataset.batch(1)\n dataset = dataset.map(transform_for_prediction)\n dataset = dataset.unbatch()\n dataset = dataset.filter(lambda frames, label, signer: tf.math.equal(label, 420) and tf.math.equal(signer, 1))\n dataset = dataset.batch(1)\n return dataset.take(1)", "def get_data(self, t_img_path, v_img_path, t_label_path, v_label_path):\n train_label_names = tf.constant(sorted(os.path.join(t_label_path, name) for name in os.listdir(t_label_path)))\n val_label_names = tf.constant(sorted(os.path.join(v_label_path, name) for name in os.listdir(v_label_path)))\n train_image_names = tf.constant(sorted(os.path.join(t_img_path, name) for name in os.listdir(t_img_path)))\n val_image_names = tf.constant(sorted(os.path.join(v_img_path, name) for name in os.listdir(v_img_path)))\n\n training_dataset = tf.data.Dataset.from_tensor_slices((train_image_names, train_label_names))\n training_dataset = training_dataset.shuffle(buffer_size=50000)\n training_dataset = training_dataset.map(self.dataset_resize_images, num_parallel_calls=4)\n training_dataset = training_dataset.map(\n lambda filename, label: tuple(tf.py_func(self.dataset_convert_labels, [filename, label], [tf.float32, tf.float32], stateful=False)),\n num_parallel_calls=4)\n training_dataset = training_dataset.prefetch(self.batch_size)\n training_dataset = training_dataset.batch(self.batch_size)\n training_dataset = training_dataset.repeat()\n\n val_dataset = tf.data.Dataset.from_tensor_slices((val_image_names, val_label_names))\n val_dataset = val_dataset.shuffle(buffer_size=5000)\n val_dataset = val_dataset.map(self.dataset_resize_images, num_parallel_calls=4)\n val_dataset = val_dataset.map(\n lambda filename, label: tuple(tf.py_func(self.dataset_convert_labels, [filename, label], [tf.float32, tf.float32], stateful=False)),\n num_parallel_calls=4)\n val_dataset = val_dataset.prefetch(self.batch_size)\n val_dataset = val_dataset.batch(self.batch_size)\n val_dataset = val_dataset.repeat()\n\n handle = tf.placeholder(tf.string, shape=[])\n iterator = tf.data.Iterator.from_string_handle(handle, training_dataset.output_types, training_dataset.output_shapes)\n images, labels = iterator.get_next()\n\n training_iterator = training_dataset.make_one_shot_iterator()\n validation_iterator = val_dataset.make_one_shot_iterator()\n\n return handle, training_iterator, validation_iterator, images, labels", "def __data_generation(self, list_IDs_temp):\n # Initialization\n if self.mirror:\n X = np.empty(\n (self.batch_size, *self.dim_in, self.n_channels_in),\n dtype=\"uint8\",\n ) \n else:\n X = np.empty(\n (self.batch_size * len(self.camnames[0]), *self.dim_in, self.n_channels_in),\n dtype=\"uint8\",\n )\n\n # We'll need to transpose this later such that channels are last,\n # but initializaing the array this ways gives us\n # more flexibility in terms of user-defined array sizes\\\n if self.labelmode == \"prob\":\n y = np.empty(\n (\n self.batch_size * len(self.camnames[0]),\n self.n_channels_out,\n *self.dim_out,\n ),\n dtype=\"float32\",\n )\n else:\n # Just return the targets, without making a meshgrid later\n y = np.empty(\n (\n self.batch_size * len(self.camnames[0]),\n self.n_channels_out,\n len(self.dim_out),\n ),\n dtype=\"float32\",\n )\n\n # Generate data\n cnt = 0\n for i, ID in enumerate(list_IDs_temp):\n if \"_\" in ID:\n experimentID = int(ID.split(\"_\")[0])\n else:\n # Then we only have one experiment\n experimentID = 0\n for _ci, camname in enumerate(self.camnames[experimentID]):\n # Store sample\n # TODO(Refactor): This section is tricky to read\n\n if not self.mirror or _ci == 0:\n if self.immode == \"video\":\n X[cnt] = self.load_vid_frame(\n self.labels[ID][\"frames\"][camname],\n camname,\n self.preload,\n self.extension,\n )[\n self.crop_height[0] : self.crop_height[1],\n self.crop_width[0] : self.crop_width[1],\n ]\n elif self.immode == \"tif\":\n X[cnt] = self.load_tif_frame(\n self.labels[ID][\"frames\"][camname], camname\n )[\n self.crop_height[0] : self.crop_height[1],\n self.crop_width[0] : self.crop_width[1],\n ]\n else:\n raise Exception(\"Not a valid image reading mode\")\n\n # Labels will now be the pixel positions of each joint.\n # Here, we convert them to\n # probability maps with a numpy meshgrid operation\n this_y = np.round(self.labels[ID][\"data\"][camname])\n if self.immode == \"video\":\n this_y[0, :] = this_y[0, :] - self.crop_width[0]\n this_y[1, :] = this_y[1, :] - self.crop_height[0]\n else:\n raise Exception(\n \"Unsupported image format. Needs to be video files.\"\n )\n\n # For 2D, this_y should be size (2, 20)\n if this_y.shape[1] != self.n_channels_out:\n # TODO(shape_exception):This should probably be its own\n # class that inherits from base exception\n raise Exception(_EXEP_MSG)\n\n if self.labelmode == \"prob\":\n # Only do this if we actually need the labels --\n # this is too slow otherwise\n (x_coord, y_coord) = np.meshgrid(\n np.arange(self.dim_out[1]), np.arange(self.dim_out[0])\n )\n for j in range(self.n_channels_out):\n # I tested a version of this with numpy broadcasting,\n # and looping was ~100ms seconds faster for making\n # 20 maps\n # In the future, a shortcut might be to \"stamp\" a\n # truncated Gaussian pdf onto the images, centered\n # at the peak\n y[cnt, j] = np.exp(\n -(\n (y_coord - this_y[1, j]) ** 2\n + (x_coord - this_y[0, j]) ** 2\n )\n / (2 * self.out_scale ** 2)\n )\n else:\n y[cnt] = this_y.T\n\n cnt = cnt + 1\n\n # Move channels last\n if self.labelmode == \"prob\":\n y = np.transpose(y, [0, 2, 3, 1])\n\n if self.mirror:\n # separate the batches from the cameras, and use the cameras as the numebr of channels \n # to make a single-shot multi-target prediction from a single image\n y = np.reshape(y, (self.batch_size, len(self.camnames[0]), y.shape[1], y.shape[2]))\n y = np.transpose(y, [0, 2, 3, 1])\n else:\n # One less dimension when not training with probability map targets\n y = np.transpose(y, [0, 2, 1])\n\n if self.downsample > 1:\n X = processing.downsample_batch(X, fac=self.downsample, method=self.dsmode)\n if self.labelmode == \"prob\":\n y = processing.downsample_batch(\n y, fac=self.downsample, method=self.dsmode\n )\n y /= np.max(np.max(y, axis=1), axis=1)[:, np.newaxis, np.newaxis, :]\n\n if self.mono and self.n_channels_in == 3:\n # Go from 3 to 1 channel using RGB conversion. This will also\n # work fine if there are just 3 channel grayscale\n X = X[:, :, :, 0]*0.2125 + \\\n X[:, :, :, 1]*0.7154 + \\\n X[:, :, :, 2]*0.0721\n\n X = X[:, :, :, np.newaxis]\n\n if self.mono:\n # Just subtract the mean imagent BGR value, which is as close as we\n # get to vgg19 normalization\n X -= 114.67\n else:\n X = pp_vgg19(X)\n return X, y", "def train(self, batch):\n pass", "def processData(data):\n ids, instances, labels = [], [], []\n for i in data:\n idField = int(i[0])\n instance = i[1:-1]\n label = i[-1]\n ids.append(idField)\n instances.append(instance)\n labels.append(label)\n\n ids = np.array(ids)\n instances = np.array(instances)\n labels = np.array(labels)\n\n return (ids, instances, labels)", "def data_gen(voc_size, batch, nbatches, seq_len = 15):\r\n for i in range(nbatches):\r\n # (batch_size, seq_len)\r\n data = torch.from_numpy(\r\n np.random.randint(1, voc_size, size=(batch, seq_len)))\r\n data[:, 0] = 1 # add start token\r\n src = Variable(data, requires_grad=False)\r\n tgt = Variable(data, requires_grad=False)\r\n yield Batch(src, tgt, 0) # Accessed by next function one by one\r", "def prepare_data(self, context_size, model_name):\n self.context_size = context_size\n data_x = []\n data_y = []\n oob = self.word2idx['OOB']\n\n for item in self.docs:\n data = [oob] * context_size + self.doc2token(item) + [oob] * context_size #padding\n for i in range(context_size, len(data) - context_size):\n data_x.append(data[i - context_size: i] + data[i + 1: i + context_size + 1])\n data_y.append(data[i])\n \n if model_name.lower() == 'skipgram':\n data_x, data_y = data_y, data_x\n self.data_x = Variable(torch.LongTensor(data_x))\n self.data_y = Variable(torch.LongTensor(data_y))\n logging.info(f'data preprocessed, data shape: {self.data_x.shape}, {self.data_y.shape}')", "def batches(data, batch_size) -> list:\n rv = []\n for idx, line in enumerate(data):\n if idx != 0 and idx % batch_size == 0:\n yield rv\n rv = []\n rv.append(line)\n yield rv", "def __getitem__(self, index):\n # Generate indexes of the batch\n batch_data = self.dataset_df[index * self.batch_size: (index + 1) * self.batch_size]\n\n # Generate data\n X, y = self._data_generation(batch_data)\n\n return X, y", "def _process(self):\n export_collect_data(self.kwargs[\"collect\"])", "def evaluate_batch(self, pipelines):", "def train_datas_debug(self, batch_size):\r\n if not isinstance(batch_size, int):\r\n raise ValueError('In Dataset, batch_size should be int, get '\r\n '{}'.format(type(batch_size)))\r\n if batch_size <= 0:\r\n raise ValueError('In Dataset, batch_size should larger equal to '\r\n '1, get {}'.format(batch_size))\r\n \r\n indices = list(range(batch_size))\r\n \r\n datas = []\r\n # for label, we have box and landmark which is 0.\r\n datas.append([self._train_datas[:batch_size], \r\n self._train_labels[:batch_size]])\r\n return datas", "def split_to_batches(self, train_data, batch_size):\n num_of_training_examples = len(train_data)\n for i in range(0, num_of_training_examples, batch_size):\n x, y = zip(*train_data[i: i+batch_size])\n yield np.vstack(x), np.vstack(y)", "def datasubset(loader, start, count, batch_size):\n # Note: start is the start index of batch, not image\n smaller_dataset = []\n end_idx = count / batch_size\n for batch_idx, (orig_images, labels) in enumerate(loader):\n if start <= batch_idx < end_idx:\n smaller_dataset.append((orig_images, labels))\n if batch_idx > end_idx:\n break\n return smaller_dataset", "def collect_data(self):\n categories = []\n list_of_feature_lists = []\n feature_sets = set()\n with open(self.csv_path, \"r\") as f:\n reader = csv.DictReader(f)\n # collecting some stats for FDR calculation:\n self.PSM_count = 0\n self.decoy_count = 0\n\n if self[\"dump_svm_matrix\"]:\n self.init_svm_matrix_dump()\n additional_matrix_info = []\n\n for i, row in enumerate(\n sorted(\n reader,\n reverse=self[\"bigger_scores_better\"],\n key=lambda d: float(d[self.col_for_sorting]),\n )\n ):\n\n features = self.row_to_features(row)\n\n if tuple(features) in feature_sets:\n continue\n feature_sets.add(tuple(features))\n\n category, psm_FDR = self.get_psm_category(row)\n\n list_of_feature_lists.append(features)\n categories.append(category)\n\n if self[\"dump_svm_matrix\"]:\n label = -1 if row_is_decoy(row) else 1\n sequence = \"{0}.{1}#{2}.{3}\".format(\n row[\"Sequence Pre AA\"].strip(),\n row[\"Sequence\"].strip(),\n row[\"Modifications\"].strip(),\n row[\"Sequence Post AA\"].strip(),\n )\n additional_matrix_info.append(\n {\n \"psm_id\": row[\"Spectrum Title\"].strip(),\n \"label\": label,\n \"scannr\": row[\"Spectrum Title\"].strip().split(\".\")[-2],\n \"peptide\": sequence,\n \"proteins\": self.parse_protein_ids(row[\"Protein ID\"]),\n }\n )\n\n if i % 1000 == 0:\n score_val = float(row[self.col_for_sorting])\n msg = (\n \"Generating feature matrix from input csv \"\n \"(line ~{0}) with score {1} and FDR \"\n \"{2}\".format(i, score_val, psm_FDR)\n )\n print(msg, end=\"\\r\")\n\n # All data points are collected in one big matrix, to make standardization possible\n print(\"\\nConverting feature matrix to NumPy array...\")\n X_raw = np.array(list_of_feature_lists, dtype=float)\n\n print(\"Replacing empty/NaN values with the mean of each column...\")\n self.nan_replacer = Imputer()\n self.nan_replacer.fit(X_raw)\n X_raw = self.nan_replacer.transform(X_raw)\n # Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance\n print(\"Standardizing input matrix...\")\n self.scaler = SCALER.fit(X_raw)\n self.X = self.scaler.transform(X_raw)\n self.categories = np.array(categories)\n print()\n\n if self[\"dump_svm_matrix\"]:\n print(\"Dumping SVM matrix to\", self[\"dump_svm_matrix\"])\n\n for i, matrix_row in enumerate(self.X):\n matrix_row_info = additional_matrix_info[i]\n self.dump_svm_matrix_row(\n row=list(matrix_row),\n psm_id=matrix_row_info[\"psm_id\"],\n label=matrix_row_info[\"label\"],\n scannr=matrix_row_info[\"scannr\"],\n peptide=matrix_row_info[\"peptide\"],\n proteins=matrix_row_info[\"proteins\"],\n )\n\n print(\"Dumped SVM matrix to\", self[\"dump_svm_matrix\"])\n return", "def __getitem__(self, index):\n # Generate indexes of the batch\n indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n\n return X, y", "def __getitem__(self, index):\n # Generate indexes of the batch\n indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n\n return X, y", "def _gather_data(self):\n for data in self._collection:\n label = data.label\n label = disambiguate(label, self._data)\n self._data[label] = data", "def get_batches(self, batch_size):\n if self.data.shape[0] % batch_size != 0:\n raise RuntimeError('num of data tuples is not a multiple of batch size')\n num_batch = self.data.shape[0] // batch_size\n for b in range(num_batch):\n yield self.data[b*batch_size:(b+1)*batch_size, :], \\\n self.target[b*batch_size:(b+1)*batch_size, :]", "def process_state_batch(self, batch):\n # batch = np.squeeze(batch, axis=1)\n batch = np.array([np.concatenate(obs, axis=-1) for obs in batch])\n return batch", "def __getitem__(self, index):\n # Generate indexes of the batch\n indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n # Generate data\n X, Y = self.__data_generation(list_IDs_temp)\n\n return X, Y", "def batches(self, batch_size): \n if self.shuffle:\n idx = np.arange(len(dataset.train_x))\n np.random.shuffle(idx)\n self.train_x = self.train_x[idx]\n \n n_batches = len(self.train_x) // batch_size\n for ii in range(0, len(self.train_x), batch_size):\n x = self.train_x[ii:ii+batch_size]\n \n yield self.scaler(x)", "def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il", "def __data_generation(self, batch_indices):\n\n X = self.__get_npy_arrays(batch_indices)\n y = self.__get_records(batch_indices)\n\n return X, y", "def run(self):\r\n self.collect_data()", "def _collect_all(self):", "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n\n if os.path.isdir(filenames):\n num_records = len(os.listdir(filenames))\n print(\"Loading from directory. \" + str(num_records) + \" tfRecords found.\")\n files = tf.data.Dataset.list_files(filenames + \"/\" + \"*.tfrecord\").shuffle(num_records)\n dataset = files.apply(\n tf.contrib.data.parallel_interleave(\n lambda x: tf.data.TFRecordDataset(x, num_parallel_reads=256, buffer_size=8*1024*1024),\n cycle_length=32, sloppy=True)\n )\n else:\n print(\"Loading from single tfRecord...\")\n dataset = tf.data.TFRecordDataset(filenames + \".tfrecord\").repeat()\n \n dataset = dataset.map(self.parser, num_parallel_calls=128)\n \n if self.subset == 'train':\n min_queue_examples = int(\n Cifar10DataSet.num_examples_per_epoch(self.subset) * 0.4)\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)\n \n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n dataset = dataset.prefetch(10)\n \n iterator = dataset.make_one_shot_iterator()\n seq_batch, input_batch, map_batch, transformation_batch = iterator.get_next()\n\n return seq_batch, input_batch, map_batch, transformation_batch", "def individual_collate(batch):\n\n data = batch\n\n collected_data = defaultdict(list)\n\n for i in range(len(list(data))):\n for k in data[i].keys():\n collected_data[k].append(data[i][k])\n\n for k in collected_data.keys():\n collected_data[k] = torch.stack(collected_data[k])\n\n return collected_data", "def __getitem__(self, index):\n\n # X : (n_samples, v_size, v_size, v_size, n_channels)\n # Initialization\n tmp_indeces = self.index_list[\n index*self.batch_size: (index+1)*self.batch_size]\n\n X = []\n IDs = []\n if self.has_labels:\n y = []\n else:\n y = None\n\n # Generate data\n for _i, load_index in enumerate(tmp_indeces):\n image = np.copy(self.data[load_index][DATA_IMAGE_INDEX])\n IDs.append(self.data[load_index][DATA_ID_INDEX])\n if self.DEBUG:\n print('load index: {}'.format(load_index))\n print('loaded image_shape: {}'.format(image.shape))\n\n # generator can also be used with test set\n if self.has_labels:\n label = np.copy(self.data[load_index][DATA_LABEL_INDEX:])\n else:\n label = None\n if self.DEBUG:\n print('label:')\n print(label.shape)\n\n # perform augmentation\n for func in self.augmentation_functions:\n if self.DEBUG:\n print('func: {}'.format(func))\n try:\n image, label = func(image, label)\n except ValueError as Error:\n print(Error)\n print('-'*15)\n raise ValueError(\n 'Error in function {} for input {} and {}'.format(\n func, image, label\n )\n )\n if self.DEBUG:\n print('image_shape after aug: {}'.format(image.shape))\n print('label_shape after aug: {}'.format(label.shape))\n print(np.max(label))\n\n if image.ndim == 2:\n image = image[..., np.newaxis]\n\n X.append(image)\n if label is not None:\n if label.ndim == 2:\n label = label[:, :, np.newaxis]\n y.append(label)\n\n X = self._to_stack(X)\n if y is not None:\n y = self._to_stack(y)\n\n if self.batch_size == 1:\n IDs = IDs[0]\n\n if self.DEBUG:\n print('X: {}'.format(X))\n\n if self.return_ID:\n return X, y, IDs\n else:\n return X, y", "def __call__(self):\n if self.numbatches is None:\n pool = self.pooler()\n if self.batchsize is None:\n self.batchsize = self.pooler.nInPool()\n self.numbatches = self.pooler.nInPool()//self.batchsize\n for i in xrange(self.numbatches):\n pool = self.pooler()\n self._reset_batch()\n if self.samplemethod == 'balance' and len(self.keysamplers)>0:\n batchinds,keyids = self._samplebalanced(pool)\n elif self.samplemethod == 'uniform':\n batchinds,keyids = self._sampleuniform(pool)\n else:\n batchinds,keyids = self._samplesequential(i)\n batch = self._extractInds(pool,batchinds,keyids)\n for k in batch:\n batch[k][np.isnan(batch[k])] = self.nanreplacement\n yield batch", "def _val_pipeline(self, ds_images, ds_labels):\n \n ds_zip = tf.data.Dataset.zip((ds_images, ds_labels))\n if self.val_count != 0:\n ds = (ds_zip.repeat(count=self.n_epochs)\n .batch(self.val_count)\n .prefetch(3))\n\n return ds", "def features_combine():\n\n\n\t# PROCESSING AUDIO", "def __data_generation(self, list_IDs_temp):\n X1 = np.empty((len(list_IDs_temp), *self.dim, self.n_channels))\n X2 = np.empty((len(list_IDs_temp), *self.dim, self.n_channels))\n X3 = np.empty((len(list_IDs_temp), *self.dim, self.n_channels))\n X4 = np.empty((len(list_IDs_temp), *self.dim, self.n_channels))\n y = np.empty(len(list_IDs_temp), dtype=int)\n\n for i, ID_instance in enumerate(list_IDs_temp):\n\n with (self.path_instances / ID_instance).open('rb') as input:\n instance = pickle.load(input)\n\n frames = instance['frames']\n\n label = instance['crossing']\n\n #Normalización de los frames\n if self.normalized:\n frames = frames * 1 / 255\n\n X1[i, ] = frames[0]\n X2[i, ] = frames[1]\n X3[i, ] = frames[2]\n X4[i, ] = frames[3]\n y[i] = label\n\n return [X1, X2, X3, X4], to_categorical(y, num_classes=self.n_classes)", "def trainDataGenerator(num_epochs):\r\n samples, all_files = get_filenames()\r\n for num in range(num_epochs):\r\n for i in range(len(samples)):\r\n sample = samples[i]\r\n for file in all_files[i]:\r\n ohvs, Y = prepData(sample, file)\r\n if (ohvs == []):\r\n continue\r\n X = np.array([ohvs[:800]])\r\n yield X, Y\r\n # for i in range(0, len(ohvs), 400):\r\n # X = np.array([ohvs[i : i+400]])\r\n # print(\"\\tX shape =\", X.shape)\r\n # yield X, Y\r", "def ptb_producer(raw_data, batch_size, num_steps, word_to_id):\n x = []\n y = []\n n_batches = len(raw_data) // batch_size\n for sentence in raw_data:\n mask_index = get_mask_index(sentence)\n current_label = sentence[mask_index]\n sentence[mask_index] = word_to_id['<mask>']\n y.append(current_label)\n x.append(sentence)\n x = np.array(x)\n x = x[:n_batches*batch_size]\n x = np.reshape(x, [n_batches, batch_size, num_steps])\n y = np.array(y)\n y = y[:n_batches * batch_size]\n y = np.reshape(y, [n_batches, batch_size])\n return x, y", "def _read(self, documents):\n data = []\n X,Y = [], []\n for document in documents:\n d_ata = pd.read_csv(document, sep='\\t', names=['review','label'])\n data.append(d_ata)\n data = pd.concat(data)\n self.data = data\n Y = data.label\n self.vec.fit(data.review)\n X = self.preprocess(data)\n \n return train_test_split(X,Y)", "def run_batch_filter(self):\n if self.speed_data is None and self.flow_data is None and self.traveltime_data is None:\n print(\n 'Warning: The measurement data must be set before running the batch filter: use function self.set_meas_data()')\n\n # =======================================================================\n # the initial ensembles, which should have been set externally\n X_init = np.matrix(np.zeros((self.dim_state, self.num_ensembles)))\n print(\n 'Setting initial ensembles: rho {0}; qin {1}; qout {2}'.format(self.init_rho, self.init_qin, self.init_qout))\n for ens in range(0, self.num_ensembles):\n X_init[self.x_index['density'][0]:\n self.x_index['density'][self.num_cells - 1], ens] = self.init_rho\n X_init[self.x_index['qin'], ens] = self.init_qin\n X_init[self.x_index['qout'], ens] = self.init_qout\n\n # print('setted qin {0}; qout {1}'.format(X_init[self.x_index['qin'], ens], X_init[self.x_index['qout'], ens] ))\n # add noise to each ensemble\n X_init[:, ens] += np.matrix(np.random.multivariate_normal(\n np.zeros(self.dim_state), self.Q)).reshape((self.dim_state, 1))\n\n self.set_initial_ensembles(X_init)\n\n # =======================================================================\n # DEBUG\n # save the qin and qout in the corresponding probe data\n # save the initial state\n if self.__debug:\n self.qin_f.append(np.squeeze(np.array(self.X_f[self.x_index['qin'], :])).tolist())\n self.qin_a.append(np.squeeze(np.array(self.X_a[self.x_index['qin'], :])).tolist())\n self.qin_obs.append(np.nan)\n\n self.qout_f.append(np.squeeze(np.array(self.X_f[self.x_index['qout'], :])).tolist())\n self.qout_a.append(np.squeeze(np.array(self.X_a[self.x_index['qout'], :])).tolist())\n self.qout_obs.append(np.nan)\n\n # The enKF runs at the finest time grid\n # for each step, update the system\n for step in range(0, self.num_steps):\n\n # update status\n sys.stdout.write('\\r')\n sys.stdout.write('Status: filtering step {0}/{1}'.format(step, self.num_steps))\n sys.stdout.flush()\n # print('Status: filtering step {0}'.format(step))\n\n cur_time = (step + 1) * self.dur_steps\n\n # get the effective measurement\n eff_flow, eff_speed, eff_traveltime = self.__get_eff_meas(cur_time)\n\n # build the observation index\n self.y_index, self.dim_obs, y_obs, cov_noise = self.__build_obs_index(eff_flow, eff_speed, eff_traveltime)\n\n # update the estimate for this step\n est_state = self.update_estimate(y_obs, cov_noise, cur_time)\n\n # =======================================================================\n # DEBUG\n # save the qin and qout in the corresponding probe data\n # save the updated state\n if self.__debug:\n self.qin_f.append(np.squeeze(np.array(self.X_f[self.x_index['qin'], :])).tolist())\n self.qin_a.append(np.squeeze(np.array(self.X_a[self.x_index['qin'], :])).tolist())\n if 'flow' in self.y_index.keys() and self.__debug_entrance_sensor in self.y_index['flow'].keys():\n self.qin_obs.append(y_obs[self.y_index['flow'][self.__debug_entrance_sensor]])\n # print('y_index[flow]:{0}'.format(self.y_index['flow'].keys()))\n # print('y_obs[ y_index[flow][entrance] ]:{0}'.format(\n # y_obs[ self.y_index['flow'][self.__debug_entrance_sensor]],\n # self.__debug_entrance_sensor))\n else:\n self.qin_obs.append(np.nan)\n\n self.qout_f.append(np.squeeze(np.array(self.X_f[self.x_index['qout'], :])).tolist())\n self.qout_a.append(np.squeeze(np.array(self.X_a[self.x_index['qout'], :])).tolist())\n if 'flow' in self.y_index.keys() and self.__debug_exit_sensor in self.y_index['flow'].keys():\n self.qout_obs.append(y_obs[self.y_index['flow'][self.__debug_exit_sensor]])\n else:\n self.qout_obs.append(np.nan)\n # =======================================================================\n # save the estimated state\n self.est_state_all[:, step] = est_state\n\n # decouple and save into self.est_density, self.est_speed, self.est_queue, self.est_traveltime\n self.est_density[:, step] = est_state[0:self.num_cells, 0]\n\n # the speed is computed using the fundamental diagram\n for cell_id in range(0, self.num_cells):\n # use the static FD at this step\n self.est_speed[cell_id, step] = self.__rho2v(self.vm_cells[cell_id, 0], self.beta_cells[cell_id, 0],\n self.rhoc_cells[cell_id, 0], self.wc_cells[cell_id, 0],\n self.est_density[cell_id, step])\n\n # REMARK: the queue and travel time a post-processed from the speed field.\n # They are computed in cross_evaluation class for all algorithms\n # the queue length starts from the first cell with speed below queue_threshold to the end of road\n # index = (self.est_speed[:, step] <= self.queue_threshold)\n #\n # # filter out the outliers\n # index_smoothed = deepcopy(index)\n # outlier_max = 3\n # counter = 0\n # for i in range(0, len(index)):\n #\n # if index[i] == True:\n # # trigger the coutner\n # counter += 1\n # elif index[i] == False and counter != 0:\n # if counter <= outlier_max:\n # # found outliers\n # index_smoothed[ i-counter : i ] = False\n # # reset counter\n # counter = 0\n #\n # # if i != 0 and i != len(index)-1:\n # # if sum( index[i-1:i+3] ) >=2:\n # # index_smoothed[i] = True\n # # else:\n # # index_smoothed[i] = False\n # # elif i == 0:\n # # if sum(index[0: 5] ) >= 3:\n # # index_smoothed[i] = True\n # # else:\n # # index_smoothed[i] = False\n # # elif i == len(index)-1:\n # # if sum(index[ i-4 :len(index)]) >= 3:\n # # index_smoothed[i] = True\n # # else:\n # # index_smoothed[i] = False\n #\n # if sum(index_smoothed) <= 3: # use 4 to suppress false alarms\n # # if less or equal then 2 cells are in congestion, it may be caused by noise.\n # self.est_queue[step] = 0\n # else:\n # # if step > 105 and step < 115:\n # # print(sum(index_smoothed))\n # # print(index_smoothed)\n # # print(index)\n #\n # self.est_queue[step] = \\\n # self.len_cells*( self.num_cells - np.argmax(index_smoothed) )\n # # try:\n # # first_cong_cell_id = [x[0] for x in enumerate( self.est_speed[:,step] ) if x[1] < self.queue_threshold][0]\n # # except IndexError:\n # # # no congested cell\n # # first_cong_cell_id = self.num_cells\n # # # the estimated queue length\n # # self.est_queue[step] = self.len_cells*( self.num_cells - first_cong_cell_id )\n #\n # # the travel time estimate is computed by summing up the travel time in each cell\n # self.est_traveltime[step] = np.sum(self.len_cells/self.est_speed[:,step])\n\n\n # =======================================================================\n # DEBUG\n # plot the update\n if self.__debug:\n plot_len = 19\n # qin\n if False:\n if not np.isnan(self.qin_obs[-1]):\n fig1 = plt.figure(figsize=(10, 5), dpi=100)\n ax1 = fig1.add_subplot(111)\n positions_f = np.arange(0, len(self.qin_f)) - 0.1\n positions_a = np.arange(0, len(self.qin_a)) + 0.1\n positions_obs = np.arange(0, len(self.qin_obs))\n # predicted as red\n bp = ax1.boxplot(self.qin_f[-plot_len:],\n positions=positions_f[-plot_len:], widths=0.15,\n patch_artist=False)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#FF4633', linewidth=1)\n # change fill color\n # box.set( facecolor = '#FF4633' )\n # corrected as green\n bp = ax1.boxplot(self.qin_a[-plot_len:],\n positions=positions_a[-plot_len:], widths=0.15, patch_artist=False)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#07891B', linewidth=1)\n # change fill color\n # box.set( facecolor = '#07891B' )\n # measurement as blue\n ax1.scatter(positions_obs[-plot_len:], self.qin_obs[-plot_len:], color='b', marker='o', s=40,\n label='Observation')\n ax1.set_title('qin')\n # x_ticks = np.arange(0, len(self.qin_f))\n # ax1.set_xticks(x_ticks[-plot_len:])\n plt.show()\n\n # qout\n if False:\n if not np.isnan(self.qout_obs[-1]):\n fig2 = plt.figure(figsize=(10, 5), dpi=100)\n ax2 = fig2.add_subplot(111)\n positions_f = np.arange(0, len(self.qout_f)) - 0.1\n positions_a = np.arange(0, len(self.qout_a)) + 0.1\n positions_obs = np.arange(0, len(self.qout_obs))\n # predicted as red\n bp = ax2.boxplot(self.qout_f[-plot_len:], positions=positions_f[-plot_len:], widths=0.18,\n patch_artist=True)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#7570b3', linewidth=1)\n # change fill color\n box.set(facecolor='#FF4633')\n # corrected as green\n bp = ax2.boxplot(self.qout_a[-plot_len:], positions=positions_a[-plot_len:], widths=0.18,\n patch_artist=True)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#7570b3', linewidth=1)\n # change fill color\n box.set(facecolor='#07891B')\n # measurement as blue\n ax2.scatter(positions_obs[-plot_len:], self.qout_obs[-plot_len:], color='b', marker='o', s=30,\n label='Observation')\n ax2.set_title('qout')\n # x_ticks = np.arange(0, len(self.qout_f))\n # ax2.set_xticks(x_ticks[-plot_len:])\n\n plt.show()\n\n # plot the estimated qin and qout\n if self.__debug:\n if True:\n qin = np.squeeze(np.array(self.est_state_all[self.x_index['qin'], :]))\n qin_meas = np.array(self.qin_obs)[1:]\n print(len(qin), len(qin_meas))\n fig1 = plt.figure(figsize=(10, 5), dpi=100)\n ax1 = fig1.add_subplot(111)\n t = np.arange(len(qin))\n ax1.plot(t, qin, 'r-', label='Estimated')\n not_nan = ~np.isnan(qin_meas)\n ax1.plot(t[not_nan], qin_meas[not_nan], 'b', label='Measured')\n ax1.legend()\n ax1.grid(True)\n ax1.set_title('qin')\n\n plt.draw()\n\n if True:\n qout = np.squeeze(np.array(self.est_state_all[self.x_index['qout'], :]))\n qout_meas = np.array(self.qout_obs)[1:]\n fig2 = plt.figure(figsize=(10, 5), dpi=100)\n ax2 = fig2.add_subplot(111)\n t = np.arange(len(qout))\n ax2.plot(t, qout, 'r-', label='Estimated')\n not_nan = ~np.isnan(qout_meas)\n ax2.plot(t[not_nan], qout_meas[not_nan], 'b', label='Measured')\n ax2.set_title('qout')\n ax2.legend()\n ax2.grid(True)\n plt.draw()", "def __getitem__(self, index):\n # get the indexs of each batch\n batch_indexs = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n # using batch_indexs to get path of current batch\n batch_path = [self.X_path[k] for k in batch_indexs]\n # get batch data\n batch_x, batch_y = self.data_generation(batch_path)\n return batch_x, batch_y", "def transform(self, chunks):\n data = np.array([chunk.flatten() for chunk in chunks])\n\n return data", "def batch_data(source, target, batch_size):\n for batch_i in range(0, len(source)//batch_size):\n start_i = batch_i * batch_size\n source_batch = source[start_i:start_i + batch_size]\n target_batch = target[start_i:start_i + batch_size]\n yield np.array(pad_sentence_batch(source_batch)), np.array(pad_sentence_batch(target_batch))", "def process_batch_static(batch, subset_size, tokeniser, model, idx2word, word2uniquevec):\r\n ## initialisation\r\n new_token_ids = []\r\n word2manyvec = {}\r\n streaming_batch_size = len(batch)\r\n\r\n start = time.time()\r\n\r\n ## processing the batch one subset at a time\r\n for s in range(0, streaming_batch_size, subset_size):\r\n print(\"Processing subset \" + str(s // subset_size + 1))\r\n if s + subset_size < len(batch):\r\n batch_subset = batch[s:s + subset_size]\r\n else:\r\n batch_subset = batch[s:]\r\n # iteratively update idx2word, new_token_ids, word2manyvec throughout the batch\r\n idx2word, new_token_ids, word2manyvec = process_subset_static(batch_subset, tokenizer, model,\r\n idx2word, new_token_ids,\r\n word2uniquevec, word2manyvec, selected_layer=1)\r\n print(\"Number of word vectors so far: \" + str(len(idx2word)))\r\n\r\n # for every new word discover, find unique vector representation by taking first PCA prinicipal component\r\n for word, veclist in word2manyvec.items():\r\n if len(veclist) == 1: # otherwise we would just get the first standard unit vector from PCA\r\n word2uniquevec[word] = torch.Tensor(veclist[0])\r\n else:\r\n pca = PCA(n_components=1)\r\n pca.fit(veclist)\r\n word2uniquevec[word] = torch.Tensor(pca.components_[0])\r\n\r\n end = time.time()\r\n print(\"Total time: \" + str(round(end - start, 2)) + \" s.\")\r\n\r\n return word2uniquevec, idx2word, new_token_ids", "def process_batch(self, data):\n [embedding_batch] = self._sess.run([self._embedding_tensor],\n feed_dict={self._features_tensor: data})\n return embedding_batch", "def on_batch(self, x, y):", "def _process_data(rdd_entry, feature_list):\n events = []\n for event in rdd_entry:\n events.append(event[RDD_EVENT])\n return IptablesIngestor.vectorize_events(events, feature_list)", "def __getitem__(self, index):\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n return X, y", "def __getitem__(self, index):\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n return X, y", "def process(self, data_batch: Any, predictions: Sequence[dict]) -> None:\n self.results.extend(_to_cpu(predictions))", "def _train_batch(self):\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n result = self._batch_iter(source, target, i)\n\n # yield\n yield result", "def get_batch(X, Y, iteration):\n offset = 100\n start = iteration * offset % len(Y)\n \n # YOUR CODE HERE\n # This will return the entire data set each iteration. This is costly, so\n # you should experiment with different way of changing this:\n return X[start: start + offset], Y[start: start + offset]", "def collect_datset(self):\n response = requests.get(self.url)\n lines = response.text.splitlines()\n data = []\n for item in lines:\n item = item.split(\",\")\n data.append(item)\n data.pop(0) # to remove labels from list\n dataset = np.matrix(data)\n return dataset", "def _get_item(self, index):\n data, label = self.data[index], self.label[index]\n coordmax = np.max(data, axis=0)\n coordmin = np.min(data, axis=0)\n nsubvolume_x = np.ceil((coordmax[0]-coordmin[0])/1.5).astype(np.int32)\n nsubvolume_y = np.ceil((coordmax[1]-coordmin[1])/1.5).astype(np.int32)\n batch_data, batch_label = [], []\n for i in range(nsubvolume_x):\n for j in range(nsubvolume_y):\n curmin = coordmin + [i*1.5, j*1.5, 0]\n curmax = coordmin+ [(i+1)*1.5, (j+1)*1.5, coordmax[2]-coordmin[2]]\n crop_ids = np.sum((data>=(curmin-0.2)) * (data<=(curmax+0.2)), axis=1) == 3\n if sum(crop_ids) == 0: continue\n crop_data = data[crop_ids]\n crop_label = label[crop_ids]\n mask = np.sum((crop_data>=(curmin-0.001)) * (crop_data<=(curmax+0.001)), axis=1) == 3\n ids = np.random.choice(crop_label.size, self.npoints, replace=True)\n this_data = crop_data[ids]\n this_label = crop_label[ids]\n this_mask = mask[ids]\n if sum(this_mask) * 1. / this_mask.size < 0.01: continue\n this_label *= this_mask\n if self.normalize:\n this_data = utils.normalize_point_cloud(this_data)\n batch_data.append(this_data[None,:,:])\n batch_label.append(this_label[None,:])\n batch_data = np.concatenate(tuple(batch_data), axis=0)\n batch_label = np.concatenate(tuple(batch_label), axis=0)\n return batch_data, batch_label", "def run(self):\r\n for pipe in self.inputs:\r\n for row in pipe.rows():\r\n self.put(row)", "def next(self):\n\n if self.i_sample < self.n_sample:\n df_batch = self.grouped[self.i_sample:min(self.n_sample, self.i_sample + self.batch_size)]\n # at end of epoch, number of sample remains may be smaller than batch size\n if len(df_batch) < self.batch_size:\n df_sample = random.sample(self.grouped, self.batch_size-len(df_batch))\n df_batch = df_batch + df_sample\n try:\n assert len(df_batch) == self.batch_size\n except AssertionError:\n print(self.i_sample, df_sample, df_batch)\n\n # get random frame_idxs\n if self.train:\n flips = np.random.choice(a=[False, True], size=(self.batch_size,), p=[0.5, 0.5])\n else:\n flips = np.zeros(self.batch_size, dtype=bool)\n\n\n video = sample_clips(df_batch, flips, self.batch_size, self.n_frame,\n self.scale_w, self.scale_h, self.sample_half_time, self.train)\n\n bboxes = np.zeros((self.batch_size, self.n_frame // self.temporal_scale, self.n_bbox, 5))\n labels = np.zeros((self.batch_size, self.n_bbox, self.num_class))\n for i in range(len(df_batch)):\n tmp_bbox, tmp_label = self.get_bbox_and_label(df_batch[i], flips[i], i, self.scale_w, self.scale_h)\n bboxes[i] = tmp_bbox\n labels[i] = tmp_label\n\n if self.debug_dataloader:\n with open('dataset/AVA_v2.1/ava_action_list_v2.1.pbtxt') as fd:\n lines = fd.readlines()\n\n labels_info = []\n for i in range(80):\n name_line = lines[i * 5 + 1]\n label_id_line = lines[i * 5 + 2]\n label_type_line = lines[i * 5 + 3]\n\n name = name_line[name_line.find('\"') + 1:name_line.rfind('\"')]\n label_id = int(label_id_line.strip().split(':')[1].strip())\n label_type = label_type_line.strip().split(':')[1].strip()\n\n assert label_id == i + 1\n labels_info.append({\n 'name': name,\n 'label_type': label_type\n })\n\n for bidx in range(self.batch_size):\n s_video = video[bidx, ...]\n s_bboxes = bboxes[bidx, ...]\n s_labels = labels[bidx, ...]\n\n window_name = 'batch_idx_'+str(bidx)\n if self.train:\n window_name += '_train'\n else:\n window_name += '_val'\n\n\n bbox = s_bboxes[0, 0, 1:].astype(np.int32)\n label_indices = np.where(s_labels[0, :])[0]\n\n for fidx in range(self.n_frame):\n # print('fidx', fidx)\n save_name = window_name + '_' + str(fidx)\n tmp_img = (s_video[:, fidx, :, :].transpose((1,2,0))).astype(np.uint8).copy()\n\n cv2.rectangle(tmp_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=(0,0,255), thickness=2)\n for en_idx, label_index in enumerate(label_indices):\n # print('label_index', label_index, 'len', len(labels_info))\n cv2.putText(tmp_img, labels_info[label_index]['name'], (bbox[0], bbox[1] + en_idx * 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, color=(0, 255, 0), thickness=1)\n\n cv2.imwrite(save_name+'.jpg', tmp_img)\n\n\n #print(video.shape, bboxes.shape, labels.shape)\n ret = mx.io.DataBatch(data=[mx.nd.array(video), mx.nd.array(bboxes)],\n label=[mx.nd.array(labels),],\n provide_data=self.provide_data,\n provide_label=self.provide_label)\n\n self.i_sample += self.batch_size\n return ret\n else:\n raise StopIteration", "def predict_collect(self, src, collector): # real signature unknown; restored from __doc__\n pass", "def execute_collect(prog, _input):\n gen = execute(prog, _input)\n out = []\n for output in gen:\n out.append(output)\n return out", "def _data(self) -> List[Data]:\n data = []\n sample_ids, repeated_sample_ids = set(), set()\n for datum in self.collection.data.filter(\n type=self.process_type,\n status=\"OK\",\n ordering=\"-created\",\n fields=self.DATA_FIELDS,\n ):\n # 1 Filter by newest datum in the sample\n if datum.sample.id in sample_ids:\n repeated_sample_ids.add(datum.sample.id)\n continue\n\n # 2 Filter by genes, if geneset is given\n if self.geneset:\n obj_geneset = self._get_obj_geneset(datum)\n if not self.geneset.issubset(obj_geneset):\n warnings.warn(\n f\"Sample {datum.sample} (Data {datum.id}) does not \"\n \"contain the genes requested in geneset input.\"\n )\n continue\n\n sample_ids.add(datum.sample.id)\n data.append(datum)\n\n if repeated_sample_ids:\n repeated = \", \".join(map(str, repeated_sample_ids))\n warnings.warn(\n f\"The following samples have multiple data of type {self.process_type}: \"\n f\"{repeated}. Using only the newest data of this sample.\",\n UserWarning,\n )\n\n if not data:\n raise ValueError(\n f\"Collection {self.collection.name} has no {self.process_type} \"\n \"data or there is no data with the requested mutations.\"\n )\n\n return data", "def make_vector_batches(data, nbatches, batch_size=None):\n print \"---->\\n.....Putting into vector-shaped batches\"\n if batch_size==None:\n batch_size = int(data['images'].shape[0]/nbatches)\n else:\n assert nbatches * batch_size <= data['images'].shape[0]\n permut = permutation(data['images'].shape[0])\n xdata = []\n ydata = []\n for i in range(nbatches):\n xs = data['images'][permut[i * batch_size:(i + 1) * batch_size],\n :, :, :]\n xdata.append(reshape(xs, (batch_size, prod(xs.shape) / batch_size)))\n ydata.append(data['labels'][permut[i * batch_size:(i + 1)\n * batch_size]])\n print \"---->\\n.....Done!\"\n return [np.reshape(np.asarray(xdata), (nbatches, batch_size, -1)),\n np.asarray(ydata)]", "def make_X(self, dataset):\n\t\tprint(\"Making X\", dataset)\n\t\tself.X[dataset] = []\n\t\tfor document in self.tokenized_documents[dataset]:\n\t\t\tarray = np.array([self.worddict.get(word, self.worddict[\"__oov__\"]) for word in document])\n\t\t\tself.X[dataset].append(array)" ]
[ "0.61714315", "0.60954803", "0.60674745", "0.6059908", "0.60306466", "0.59371", "0.5907216", "0.5905398", "0.57684493", "0.5712042", "0.5711107", "0.56662107", "0.56599396", "0.56588864", "0.56463087", "0.56463087", "0.5631652", "0.56258637", "0.56173337", "0.5593329", "0.55716115", "0.5561289", "0.5560932", "0.5546948", "0.55230534", "0.55104786", "0.55050397", "0.55048656", "0.54971784", "0.5492508", "0.5480044", "0.54714453", "0.5465421", "0.546343", "0.54553556", "0.545092", "0.5440241", "0.54381645", "0.5428587", "0.5427473", "0.5421378", "0.54036474", "0.53925145", "0.5389539", "0.5386184", "0.5385828", "0.53802454", "0.5379771", "0.5379597", "0.53568935", "0.5355288", "0.5353408", "0.5351376", "0.5350639", "0.5349312", "0.5347558", "0.53439236", "0.53421277", "0.5331383", "0.5331383", "0.53254646", "0.5320301", "0.5319041", "0.53095114", "0.5306735", "0.5305706", "0.52992004", "0.52953696", "0.52943265", "0.5293408", "0.52916306", "0.5291388", "0.5288562", "0.52872586", "0.5284137", "0.52839476", "0.5280144", "0.5278063", "0.52762455", "0.52717406", "0.5269932", "0.5268301", "0.5266419", "0.52516836", "0.5250776", "0.52467275", "0.5241858", "0.5238551", "0.5238551", "0.52326477", "0.52301466", "0.5222641", "0.521762", "0.521075", "0.5205307", "0.52049804", "0.51989347", "0.518791", "0.518646", "0.51851845", "0.518083" ]
0.0
-1
Returns the single qubit gate Sqrt(X)
def sqrtx(): return Operator([[(1.+1.j)/2,(1.-1.j)/2],[(1.-1.j)/2,(1.+1.j)/2]])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sq(self, x):\n\t\treturn x * x", "def qrst_tm(x):\n return 0.2228*x - 0.6685", "def sq(x):\n\n return x ** x", "def isqrt(n): # newton (from stackoverflow)\n if n < 0:\n print(f\"segur que vols fer l'arrel de {n}?\")\n n = -n\n x = n\n y = (x + 1) // 2\n while y < x:\n x = y\n y = (x + n // x) // 2\n return x", "def sqr(x):\n return x ** 2", "def sqr(x):\n return x * x", "def mySqrt(self, x: int) -> int:\n if x == 0:\n return 0\n d = 0.1\n y = x / 2\n z = (y + x/y) / 2\n e = abs(z-y)\n while e > d:\n y = z\n z = (y + x/y) / 2\n e = abs(z - y)\n return int(z)", "def isqrt(n):\n if n < 0:\n raise ValueError('square root not defined for negative numbers')\n elif n <= MAX_EXACT:\n # For speed, we use floating point maths.\n return int(n**0.5)\n return _isqrt(n)", "def isqrt( a, b ):\n return a*a - b", "def Sqrt(a):\n c = Div(Add(a, 1), 2)\n b = a\n while(c < b):\n b = c\n c = Div(Add(Div(a, c), c), 2)\n return c", "def sqrt(x):\n return 0.0", "def _rsq(self):\n return self._ss_reg / self._ss_tot", "def sqrtdenest (expr):\n expr = sympify(expr)\n if expr.is_Pow and expr.exp is S.Half: #If expr is a square root\n return denester([expr])[0]\n return expr", "def sqsigned(x):\n return tf.sign(x) * (x ** 2)", "def SqrtSwap():\n\n return Operator(np.array([[[[ 1.0, 0.0],\n [ 0.0, 0.5 * (1 + 1j)]],\n [[ 0.0, 0.0],\n [ 0.5 * (1 - 1j), 0.0]]],\n [[[ 0.0, 0.5 * (1 - 1j)],\n [ 0.0, 0.0]],\n [[ 0.5 * (1 + 1j), 0.0],\n [ 0.0, 1.0]]]]))", "def isqrt(inputnum):\n if inputnum >= 0:\n return math.sqrt(inputnum)\n else:\n return complex(0, math.sqrt(-inputnum))", "def my_sqrt(x):\n square_root = x**(0.5)\n return square_root", "def isqrt(n):\r\n x = n\r\n y = (x + 1) // 2\r\n while y < x:\r\n x = y\r\n y = (x + n // x) // 2\r\n return x", "def sqrt(self):\n\n\t\t# Maintain state of self and create new trace variable new_var\n\t\tnew_var = Var(self.val, self.der)\n\t\treturn new_var.__pow__(0.5)", "def calc_q_square(self):\n return self._q_x()**2 + self._q_z()**2", "def sqrty():\n return Operator([[(1.+1.j)/2,(-1-1.j)/2],[(1.+1.j)/2,(1.+1.j)/2]])", "def norm_sqr(x):\n return inner_prod(x, x)[0]", "def sqrt_shifted_rate(self, t, i):\n return np.real(self._sqrt_shifted_rates[i](t))", "def rhs(x, t):\n\n return - np.sin(np.pi * x) * (np.sin(t) - 1 * np.pi ** 2 * np.cos(t))", "def symsqrt_v2(A, func='symeig'):\n if func == 'symeig':\n s, v = A.symeig(eigenvectors=True) # This is faster in GPU than CPU, fails gradcheck. See https://github.com/pytorch/pytorch/issues/30578\n elif func == 'svd':\n _, s, v = A.svd() # But this passes torch.autograd.gradcheck()\n else:\n raise ValueError()\n\n above_cutoff = s > s.max() * s.size(-1) * torch.finfo(s.dtype).eps\n\n ### This doesn't work for batched version\n\n ### This does but fails gradcheck because of inpalce\n\n ### This seems to be equivalent to above, work for batch, and pass inplace. CHECK!!!!\n s = torch.where(above_cutoff, s, torch.zeros_like(s))\n\n sol =torch.matmul(torch.matmul(v,torch.diag_embed(s.sqrt(),dim1=-2,dim2=-1)),v.transpose(-2,-1))\n\n return sol", "def sqsignit(fun):\n\n def wrapper(self, x, *arg, **kwarg):\n return sqrtsigned(fun(self, sqsigned(x), *arg, **kwarg))\n\n return wrapper", "def std(x, ddof=0):\n with mp.extraprec(16):\n return mp.sqrt(var(x, ddof))", "def _square_rooted(x):\n return sqrt(sum([(a * a) for a in x]))", "def rsqrt(data):\n return _make.rsqrt(data)", "def square(x):\n\n\treturn x * x", "def qr(T):\n Q, R = splinalg.qr(T, mode='economic')\n sR = np.sign(np.real(np.diag(R)))\n sR[sR == 0] = 1\n Q, R = Q * sR, sR.reshape([-1, 1]) * R\n # maxQ, minQ = Q.max(0), Q.min(0)\n # maxR, minR = R.max(1), R.min(1)\n # ind = (np.abs(minQ) > maxQ) & (np.abs(minR) > maxR)\n # Q[:, ind] *= -1\n # R[ind] *= -1\n return Q, R", "def squared(x=2):\n return x ** 2", "def square(x: float) -> float:\n return x * x", "def sqrt(x):\n # lets check that x is positive\n if x < 0:\n print(\"Error: negative value was supplied\")\n return -1\n\n\n # Initial guess for the square root \n z = x / 2.0 \n \n # Continuously improve the guess.\n while abs(x - (z*z)) > 0.01: \n z = z - (((z*z) - x) / (2*z))\n \n return z", "def sqrt(x):\r\n # see decorator for function body\r", "def sqrtsigned(x):\n return tf.sign(x) * tf.sqrt(tf.abs(x))", "def std(x):\n return sqrt(TinyStatistician.var(x))", "def qfunc(x):\n # Error check inputs\n if isinstance(x, np.ndarray):\n if x.dtype == np.complex128:\n raise TypeError(\"complex input not supported\")\n else:\n if isinstance(x, complex):\n raise TypeError(\"complex input not supported\")\n\n Q = 0.5 * erfc(x / np.sqrt(2.0))\n return Q", "def qtt_sym(lmax,rlmin,rlmax,fC,Tlm,nside=None):\n if nside is None: nside= lmax\n return libcurvedsky.rec_tau.qtt_sym(lmax,rlmin,rlmax,fC,Tlm,nside)", "def my_square(x):\n return x ** 2", "def T(self, q = np.zeros(1) , dq = np.zeros(1) , ddq = np.zeros(1) , R = 1 ): \n \n F = self.F( q , dq , ddq )\n \n Tl = self.Tlosses( dq , ddq )\n \n T = np.dot( 1. / R , F ) + np.dot( R , Tl ) \n \n return T", "def _blr_tsqr(obj):\n nb = obj.nb[0]\n A = obj\n Q = core.BlockLowRank(numpy.full((nb, 1), None))\n B = numpy.full(nb, None)\n\n for i in range(nb):\n if isinstance(A[i, 0], core.LowRank):\n Qi, Ri = qr(A[i, 0].U)\n Q[i, 0] = Qi\n B[i] = Ri * A[i, 0].V\n else:\n B[i] = A[i, 0]\n\n B = numpy.vstack(B)\n\n if B.shape[0] < B.shape[1]:\n Z = numpy.zeros((B.shape[1] - B.shape[0], B.shape[1]))\n B = numpy.vstack([B, Z])\n\n Qb, R = qr(B)\n rstart, rend = 0, 0\n\n for i in range(nb):\n if isinstance(A[i, 0], core.LowRank):\n rstart = rend\n rend = rend + A[i, 0].rank\n U = Q[i, 0]\n V = Qb[rstart:rend, :]\n Q[i, 0] = core.LowRank((U, V), A[i, 0].method, A[i, 0].eps)\n else:\n rstart = rend\n rend = rend + A[i, 0].shape[0]\n Q[i, 0] = Qb[rstart:rend, :]\n\n return Q, R", "def xut2q( self, x , u , t ):\n \n # default is q = x\n \n return x", "def Sqr(num):\n return math.sqrt(float(num))", "def q(self) -> float:\n return self._pwr.imag", "def rsq(self):\n return np.squeeze(self._rsq)", "def square(x):\n return x**2", "def get_sqrt_2():\n return 1.41421356", "def _q_x(self):\n lambda_r = self.latt_par['lambda_r'].value \n return 2*np.pi*self.k/lambda_r", "def sqrt(a):", "def r(o, t):\n return o*t**0.5", "def circuit_one_qubit_one_param_rx_ry(inpt):\n qml.RX(inpt[0], wires=0)\n qml.RY(inpt[0], wires=0)\n return qml.expval(qml.PauliZ(0))", "def findQ(H_s):\n nl,dl = symToTransferFn(H_s)\n syst = sp.lti(nl,dl)\n p1,p2 = syst.poles[0], syst.poles[1]\n return np.sqrt(abs(p1*p2))/abs(p1+p2)", "def get_bprop_rsqrt(self):\n\n def bprop(x, out, dout):\n grad = F.fill(F.dtype(x), F.shape(x), -0.5) / (F.sqrt(x)*x)\n dx = dout * grad\n return (dx,)\n return bprop", "def sqrt(n):\n pass", "def square( x ):\n return x * x", "def square_func(i, T, amp, p = 10000):\n if (i//p)%2 == 0:\n return T + amp\n else:\n return T - amp", "def quartic_potential(x):\n k1=1\n k2=10\n return (k1*x**4)-(k2*x**2)", "def qgset(x):\n return 0.2855*x - 0.8565", "def stirling(n):\n return n**n*isqrt(2*math.pi*n)/math.e**n", "def my_square(y):\n\treturn (y ** 2)", "def srwf(xi):\n\treturn np.sqrt(wienergain(xi)) # SRWF gain function.", "def square(x):\n return x*x", "def SqrtNot(d=1):\n\n return Operator(0.5 * np.array([[1 + 1j, 1 - 1j],\n [1 - 1j, 1 + 1j]])).tensor_power(d)", "def sqrt(x):\n if x < 0:\n raise ValueError(f\"Cannot compute sqrt of negative number {x}\")\n guess = x\n i = 0\n while guess * guess !=x and i < 20:\n guess = (guess + x / guess) / 2.0\n i += 1\n return guess", "def sqrt(tensor):\n raise NotImplementedError", "def symsqrt_v1(A, func='symeig'):\n ## https://github.com/pytorch/pytorch/issues/25481#issuecomment-576493693\n ## perform the decomposition\n ## Recall that for Sym Real matrices, SVD, EVD coincide, |λ_i| = σ_i, so\n ## for PSD matrices, these are equal and coincide, so we can use either.\n if func == 'symeig':\n s, v = A.symeig(eigenvectors=True) # This is faster in GPU than CPU, fails gradcheck. See https://github.com/pytorch/pytorch/issues/30578\n elif func == 'svd':\n _, s, v = A.svd() # But this passes torch.autograd.gradcheck()\n else:\n raise ValueError()\n\n ## truncate small components\n good = s > s.max(-1, True).values * s.size(-1) * torch.finfo(s.dtype).eps\n components = good.sum(-1)\n common = components.max()\n unbalanced = common != components.min()\n if common < s.size(-1):\n s = s[..., :common]\n v = v[..., :common]\n if unbalanced:\n good = good[..., :common]\n if unbalanced:\n s = s.where(good, torch.zeros((), device=s.device, dtype=s.dtype))\n return (v * s.sqrt().unsqueeze(-2)) @ v.transpose(-2, -1)", "def my_square(y):\n\treturn (y **2)", "def R_square(self,parameterValues=None):\n sst = self.SST(parameterValues)\n cost = self.Cost(parameterValues)\n return 1.- cost/sst", "def sqrt(x: int):\n pass", "def __R1(x: float) -> np.float64:\n if np.abs(x) < end:\n return np.float64((1 / np.pi) * np.sqrt(2 * N - x * x))\n return np.float64(0.0)", "def square(x):\n return x * x", "def std(self):\n\t\treturn np.sqrt(0.6) #obtained by integrating 1.5x^4 from -1 to 1", "def q(self):\n return self._x", "def _do_sqrt(x, prec=None, extend=True, all=False):\n if prec:\n if x >= 0:\n return RealField(prec)(x).sqrt(all=all)\n else:\n return ComplexField(prec)(x).sqrt(all=all)\n if x == -1:\n from sage.symbolic.pynac import I\n z = I\n else:\n z = SR(x) ** one_half\n\n if all:\n if z:\n return [z, -z]\n else:\n return [z]\n return z", "def eval(self, x):\n self.__check_input__(x)\n x1 = x[0]\n x2 = x[1]\n\n t = 1 / (8 * np.pi)\n s = 10\n r = 6\n c = 5 / np.pi\n b = 5.1 / (4 * np.pi ** 2)\n a = 1\n\n term1 = a * (x2 - b * x1 ** 2 + c * x1 - r) ** 2\n term2 = s * (1 - t) * np.cos(x1)\n\n return term1 + term2 + s", "def exactsolution(x, t, u):\n if 0 <= (x - u*t) and (x - u*t) <= 0.2:\n temp = 1 - (10 * (x - u*t) -1)**2\n else:\n temp = 0\n return temp", "def _eta_sfr_scaling(self,x,q):\n i = self.enum[q]\n A = self.scaling_params['A'][i]\n b = self.scaling_params['b'][i]\n return A*x**b", "def sqr(a):\n return a * a", "def recur_se(x, rate):\n sigma2 = 1.\n return sigma2 * (1. - 1. / np.sqrt(1 + rate * x))", "def signed_sqrt(x):\n return keras.backend.sign(x) * keras.backend.sqrt(keras.backend.abs(x) + 1e-9)", "def globalized_sqp(self, x_0, dual_x_0):\n raise Exception(\"Not implemented in {0}\".format(self))", "def sqrt(x):\n ans=0\n if x>=0:\n while ans*ans<x: ans=ans+1\n if ans*ans != x:\n print(x, \" is not a perfect square!\")\n return None\n #Note, it returns ans, but it does not print it!!\n else: return ans\n else:\n print(x, \" is a negative number!\")\n return None", "def sqr(f):\n return f.per(dmp_sqr(f.rep, f.lev, f.dom))", "def dbl(x):\n return 2 * x", "def basis(A):\n if A.is_cuda:\n # torch.orgqr is not available in CUDA\n Q, _ = torch.qr(A, some=True)\n else:\n Q = torch.orgqr(*torch.geqrf(A))\n return Q", "def s(self) -> NumType:\n return abs(self._pwr)", "def sqrt(obj):\n\tif isinstance(obj, Variable):\n \t\tnew_Variable = Variable(obj.val, obj.der)\n \t\treturn new_Variable.__pow__(0.5)\n\telse:\n\t\treturn np.sqrt(obj)", "def __call__ (self, x) :\n fx = fixed_q_exp (x)\n result = fx (self)\n return result", "def R_square(self,parameterValues):\n sst = self.SST(parameterValues)\n cost = self.Cost(parameterValues)\n return 1.- cost/sst", "def S(self, qubit_expr):\n self.apply_gate_operation(cirq.ops.S, qubit_expr)", "def rsr(self) -> float:\n return float(self.rmse() / np.std(self.true))", "def my_square(y):\n\treturn(y ** 2)", "def integer_sqrt(x: int):\n\n assert x > 0\n\n _1_40 = 1 << 40 # 2**40\n\n if x < _1_40:\n return int(sqrt(x)) # use math's sqrt() for small parameters\n\n n = int(x)\n\n if n <= 1:\n return n # handle sqrt(0)==0, sqrt(1)==1\n\n # Make a high initial estimate of the result (a little lower is slower!!!)\n r = 1 << ((n.bit_length() + 1) >> 1)\n\n while True:\n\n newr = (r + n // r) >> 1 # next estimate by Newton-Raphson\n if newr >= r:\n return r\n r = newr", "def sqrt(x):\n def good_enough(guess):\n precision = 0.001\n f = abs(guess ** 2 - x)\n return (f < precision)\n \n def improve(guess):\n return (guess + x/guess) / 2.0\n \n counter = 1\n guess = 1\n while not good_enough(guess) and counter <= 100:\n guess = improve(guess)\n counter += 1\n assert counter <= 100,'100 iterations done and no good answer' \n return int(guess)", "def rms(x):\n rms = np.sqrt(np.nansum(np.square(x))/np.float(np.sum(~np.isnan(x))))\n return rms", "def double(x):\n return x*2", "def f(x):\n \"\"\" Xrhsimopoihste MONO ekfraseis klhshs,\n p.x., stis add, mul, pow, sqrt, truediv, ...\n OXI infix telestes (+, /, ...) \"\"\"\n\n return round(truediv(1,add(add(2,truediv(3,add(x,4))),truediv(1,x))),4)", "def rayleigh_quotient(x, A):\n\n R = (x.conj().T.dot(A).dot(x))/(x.conj().T.dot(x))\n\n return R", "def g(self, x):\n return x * (1 - x)" ]
[ "0.6638449", "0.66072994", "0.65911305", "0.63476807", "0.6344686", "0.62796086", "0.6270373", "0.62620115", "0.62110555", "0.614344", "0.61330664", "0.61031693", "0.6101553", "0.6096317", "0.608822", "0.6087951", "0.6056629", "0.60234123", "0.59946364", "0.5992759", "0.5953574", "0.5905764", "0.58225095", "0.5811624", "0.579687", "0.57758546", "0.5700306", "0.5674259", "0.5668967", "0.5641139", "0.563238", "0.5615597", "0.56094825", "0.5599056", "0.55939996", "0.55939513", "0.5590112", "0.5587385", "0.55842936", "0.5583403", "0.55798084", "0.5572664", "0.557091", "0.55699843", "0.5567256", "0.55584764", "0.55579054", "0.55553", "0.5539105", "0.5501705", "0.5496696", "0.5483385", "0.54823726", "0.54788125", "0.5459188", "0.54547393", "0.54488873", "0.5438961", "0.54387033", "0.5436718", "0.5436203", "0.54227746", "0.54167217", "0.54099864", "0.54091245", "0.5407864", "0.5407283", "0.53923124", "0.5388673", "0.5384534", "0.538276", "0.5379182", "0.5377567", "0.5367795", "0.5356936", "0.5347158", "0.53451157", "0.53441286", "0.5339304", "0.53388906", "0.53373915", "0.53357846", "0.5324721", "0.5321804", "0.5318295", "0.5317413", "0.5308669", "0.52883", "0.5287648", "0.5282769", "0.5281982", "0.5279297", "0.5275642", "0.52633435", "0.5261103", "0.52577627", "0.52564526", "0.52514064", "0.5247274", "0.52277046" ]
0.6088627
14
Returns the single qubit gate Sqrt(Y)
def sqrty(): return Operator([[(1.+1.j)/2,(-1-1.j)/2],[(1.+1.j)/2,(1.+1.j)/2]])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def qrst_tm(x):\n return 0.2228*x - 0.6685", "def Y(t, p, q):\n \n if t <= 0:\n return float('inf')\n \n if q == 1:\n return (t**(p+1) - 1) / (p * (p+1)) - np.log(t) / q + (p - 1) / p * (t-1)\n else:\n return (t**(p+1) - 1) / (p * (p+1)) + (t**(1-q) - 1) / (q*(q-1)) + (p - q) / (p * q) * (t-1)", "def my_square(y):\n\treturn (y ** 2)", "def my_square(y):\n\treturn (y **2)", "def sqrtx():\n return Operator([[(1.+1.j)/2,(1.-1.j)/2],[(1.-1.j)/2,(1.+1.j)/2]])", "def my_square(y):\n\treturn(y ** 2)", "def qrst_tm_ao(y):\n return (y - (-0.6685))/0.2228", "def get_rsq_y(self):\n\n return np.matmul(self.beta_z.T, self.sigma_zw)", "def twiny(self):\n return self.altx()", "def y(self):\n return self.yn.func", "def _rsq(self):\n return self._ss_reg / self._ss_tot", "def Yp(t, p, q):\n \n return (t**p - 1) / p + (1-t**(-q)) / q", "def rhs(y,t):\n return math.cos(t)", "def rate(self, t, y):\n if y[1] >= self.parameters.T:\n return super(SFORT, self).rate(t, y)\n else:\n return 0", "def sq(self, x):\n\t\treturn x * x", "def Y(self, qubit_expr): \n self.apply_gate_operation('Y', qubit_expr)", "def calc_q_square(self):\n return self._q_x()**2 + self._q_z()**2", "def sq(x):\n\n return x ** x", "def sqrtdenest (expr):\n expr = sympify(expr)\n if expr.is_Pow and expr.exp is S.Half: #If expr is a square root\n return denester([expr])[0]\n return expr", "def rate(self, t, y):\n k = self._calc_k(y[1])\n # return k * (1 - y - self.parameters['y0'])\n dy = self.parameters.y0 - y[0]\n return [k * dy if dy > 1e-6 else 0]", "def rhs(x, t):\n\n return - np.sin(np.pi * x) * (np.sin(t) - 1 * np.pi ** 2 * np.cos(t))", "def rate(self, t, y):\n k1, k2 = self._k(y[-1])\n if y[1] > 1e-6:\n dydt = [(self.parameters.y1 * k1 + self.parameters.y2 * k2) * y[1],\n -(k1 + k2) * y[1]]\n else:\n dydt = [0, 0]\n return dydt", "def SqrtSwap():\n\n return Operator(np.array([[[[ 1.0, 0.0],\n [ 0.0, 0.5 * (1 + 1j)]],\n [[ 0.0, 0.0],\n [ 0.5 * (1 - 1j), 0.0]]],\n [[[ 0.0, 0.5 * (1 - 1j)],\n [ 0.0, 0.0]],\n [[ 0.5 * (1 + 1j), 0.0],\n [ 0.0, 1.0]]]]))", "def RHS(y,t):\r\n\r\n return np.multiply(A.dot(y),ones-y)-beta*y", "def sqrt(self):\n\n\t\t# Maintain state of self and create new trace variable new_var\n\t\tnew_var = Var(self.val, self.der)\n\t\treturn new_var.__pow__(0.5)", "def sqrt_shifted_rate(self, t, i):\n return np.real(self._sqrt_shifted_rates[i](t))", "def sqr(x):\n return x ** 2", "def T(self, q = np.zeros(1) , dq = np.zeros(1) , ddq = np.zeros(1) , R = 1 ): \n \n F = self.F( q , dq , ddq )\n \n Tl = self.Tlosses( dq , ddq )\n \n T = np.dot( 1. / R , F ) + np.dot( R , Tl ) \n \n return T", "def qtt_sym(lmax,rlmin,rlmax,fC,Tlm,nside=None):\n if nside is None: nside= lmax\n return libcurvedsky.rec_tau.qtt_sym(lmax,rlmin,rlmax,fC,Tlm,nside)", "def y(self) -> int:", "def f(t,y):\n return (lam*y)", "def sqr(x):\n return x * x", "def evaluateY(self, time) -> float:\n ...", "def R_squared(self):\n return 1 - ((self.y - self.y_hat(self.x))**2).sum() / ((self.y - self.y.mean())**2).sum()", "def q(self) -> float:\n return self._pwr.imag", "def circuit_one_qubit_one_param_rx_ry(inpt):\n qml.RX(inpt[0], wires=0)\n qml.RY(inpt[0], wires=0)\n return qml.expval(qml.PauliZ(0))", "def rsq(self):\n return np.squeeze(self._rsq)", "def twinx(self):\n return self.alty()", "def isqrt( a, b ):\n return a*a - b", "def rate(self, t, yt):\n # TODO add with parameters\n T = yt[-1]\n y = yt[:-1]\n # self.__log.debug('Em %s', Em)\n dIdt = (self.parameters.A0 * np.exp(-self._Em / Rgas / T))\n # self.__log.debug('dkdt %s', dkdt)\n coeff1 = self.Wm * self.mt / sqrtpi\n coeff2 = np.exp(-pow(\n (self._Em - self.parameters.E0) / self.parameters.sigma, 2) / 2)\n coeff3 = np.exp(-y[1:]) * dIdt\n # self.__log.debug('coeff: %s %s %s', coeff1, coeff2, coeff3)\n # dydt = (self.parameters['y0'] - y[0]) * \\\n # np.sum(coeff1 + coeff2 + coeff3)\n dydt = self.parameters.y0 * np.sum(coeff1 * coeff2 * coeff3)\n # self.__log.debug('dydt %s', dydt)\n return np.append(dydt, dIdt)", "def get_S_r(self):\n\n S_r = np.sum((self.eta_model - self.eta_exp) ** 2.)\n\n return S_r", "def isqrt(n): # newton (from stackoverflow)\n if n < 0:\n print(f\"segur que vols fer l'arrel de {n}?\")\n n = -n\n x = n\n y = (x + 1) // 2\n while y < x:\n x = y\n y = (x + n // x) // 2\n return x", "def y(x):\n x1, x2, x3, x4 = x[:, 0], x[:, 1], x[:, 2], x[:, 3]\n return 1 + 0.3 * x1 - 0.6 * x2 ** 2 - 0.2 * x3 ** 3 + 0.5 * x4 ** 4", "def findQ(H_s):\n nl,dl = symToTransferFn(H_s)\n syst = sp.lti(nl,dl)\n p1,p2 = syst.poles[0], syst.poles[1]\n return np.sqrt(abs(p1*p2))/abs(p1+p2)", "def sqsigned(x):\n return tf.sign(x) * (x ** 2)", "def Ry(q):\n sin_q, cos_q = sin(q), cos(q)\n return numpy.matrix([\n [cos_q, 0, -sin_q, 0],\n [0, 1, 0, 0],\n [sin_q, 0, cos_q, 0],\n [0, 0, 0, 1],\n ])", "def y(self) -> float:\n return self.A[2] if self.scalar_vector else self.A[1]", "def circuit_one_qubit_one_param_h_ry(inpt):\n qml.Hadamard(wires=0)\n qml.RY(inpt[0], wires=0)\n return qml.expval(qml.PauliZ(0))", "def yule_q(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = a + b + c + d\n\n if n == 0:\n return np.nan\n elif p1 == n:\n # c and d are zero\n return _div(a - b, p1)\n elif p2 == n:\n # b and d are zero\n return _div(a - c, p2)\n elif q1 == n:\n # a and b are zero\n return _div(d - c, q1)\n elif q2 == n:\n # a and c are zero\n return _div(d - b, q2)\n\n return _div(self.covar(), a * d + b * c)", "def y(self):\r\n return self.unif[1]", "def P_Tn(self,\n yn:float,\n n:int) -> float:\n return 1 / ((1 + yn * 0.5) **n)", "def toy_hamiltonian(x):\n q,p = extract_q_p(x)\n pSqr = tf.square(p)\n return 1/2 * tf.square(q - 1/4 * pSqr) + 1/32 * pSqr", "def yule_y(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = a + b + c + d\n\n if n == 0:\n return np.nan\n elif p1 == n:\n # c and d are zero\n return _div(sqrt(a) - sqrt(b), sqrt(a) + sqrt(b))\n elif p2 == n:\n # b and d are zero\n return _div(sqrt(a) - sqrt(c), sqrt(a) + sqrt(c))\n elif q1 == n:\n # a and b are zero\n return _div(sqrt(d) - sqrt(c), sqrt(d) + sqrt(c))\n elif q2 == n:\n # a and c are zero\n return _div(sqrt(d) - sqrt(b), sqrt(d) + sqrt(b))\n\n ad = a * d\n bc = b * c\n\n return _div(sqrt(ad) - sqrt(bc), sqrt(ad) + sqrt(bc))", "def __call__(self, t, y):\n # State vector is [T, Y_1, Y_2, ... Y_K]\n self.gas.set_unnormalized_mass_fractions(y[1:])\n self.gas.TP = y[0], self.P\n rho = self.gas.density\n\n wdot = self.gas.net_production_rates\n dYdt = (self.yin-self.gas.Y)/self.tres + wdot*self.gas.molecular_weights/rho\n dTdt = (self.tin-self.gas.T)/self.tres + \\\n self.Q*(self.ta-self.gas.T)/(rho*self.gas.cp) - \\\n (np.dot(self.gas.partial_molar_enthalpies,wdot)/(rho*self.gas.cp))\n return np.hstack((dTdt, dYdt))", "def R_squared(y_true, y_pred):\n SSE = K.sum(K.square(y_true - y_pred))\n TSS = K.sum(K.square(y_true - K.mean(y_true)))\n return 1-SSE/(TSS+K.epsilon())", "def runge_integrator(self, t, y, dt, tau):\n\n k1 = self.plant.rhs(t, y, tau)\n k2 = self.plant.rhs(t + 0.5 * dt, y + 0.5 * dt * k1, tau)\n k3 = self.plant.rhs(t + 0.5 * dt, y + 0.5 * dt * k2, tau)\n k4 = self.plant.rhs(t + dt, y + dt * k3, tau)\n return (k1 + 2 * (k2 + k3) + k4) / 6.0", "def rsr(self) -> float:\n return float(self.rmse() / np.std(self.true))", "def W(self):\n if not self.isVaild():\n pass\n return self.Wq() + 1.0/self.muy", "def c(self, y, t):\n return 1 / 2 * (np.sum((y - t) ** 2) ** 0.5) ** 2", "def r(o, t):\n return o*t**0.5", "def Y(phi = None):\n if phi == None:\n return sy\n else:\n return scipy.linalg.expm(-1j * phi / 2 * sy)", "def rhs(t, Y, q, omega_d, b):\n f = np.zeros_like(Y)\n\n f[0] = Y[1]\n f[1] = -q*Y[1] - np.sin(Y[0]) + b*np.cos(omega_d*t)\n\n return f", "def get_ytm_function(self,x: float):\n ytm = Bootstrapping.get_ytm_dict(self)\n if x<1:\n return ytm[1]\n if x>=1 and x<=4:\n return ytm[1]+((ytm[4]-ytm[1])/(4-1))*(x-1)\n elif x>4 and x<=6:\n return ytm[4]+((ytm[6]-ytm[4])/(6-4))*(x-4)\n elif x>6 and x<=8:\n return ytm[6]+((ytm[8]-ytm[6])/(8-6))*(x-6)\n elif x>8 and x<=10:\n return ytm[8]+((ytm[10]-ytm[8])/(10-8))*(x-8)\n elif x>10 and x<=14:\n return ytm[10]+((ytm[14]-ytm[10])/(14-10))*(x-10)\n elif x>14 and x<=20:\n return ytm[14]+((ytm[20]-ytm[14])/(20-14))*(x-14)\n elif x>20 and x<=40:\n return ytm[20]+((ytm[40]-ytm[20])/(40-20))*(x-20)\n elif x>40 and x<=60:\n return ytm[40]+((ytm[60]-ytm[40])/(60-40))*(x-40)", "def value(self): \r\n c = self.nd1() * self.s * math.exp(-self.div * self.t)\r\n c -= self.nd2() * self.x * math.exp(-self.rf * self.t)\r\n \r\n return c", "def _blr_tsqr(obj):\n nb = obj.nb[0]\n A = obj\n Q = core.BlockLowRank(numpy.full((nb, 1), None))\n B = numpy.full(nb, None)\n\n for i in range(nb):\n if isinstance(A[i, 0], core.LowRank):\n Qi, Ri = qr(A[i, 0].U)\n Q[i, 0] = Qi\n B[i] = Ri * A[i, 0].V\n else:\n B[i] = A[i, 0]\n\n B = numpy.vstack(B)\n\n if B.shape[0] < B.shape[1]:\n Z = numpy.zeros((B.shape[1] - B.shape[0], B.shape[1]))\n B = numpy.vstack([B, Z])\n\n Qb, R = qr(B)\n rstart, rend = 0, 0\n\n for i in range(nb):\n if isinstance(A[i, 0], core.LowRank):\n rstart = rend\n rend = rend + A[i, 0].rank\n U = Q[i, 0]\n V = Qb[rstart:rend, :]\n Q[i, 0] = core.LowRank((U, V), A[i, 0].method, A[i, 0].eps)\n else:\n rstart = rend\n rend = rend + A[i, 0].shape[0]\n Q[i, 0] = Qb[rstart:rend, :]\n\n return Q, R", "def isqrt(n):\n if n < 0:\n raise ValueError('square root not defined for negative numbers')\n elif n <= MAX_EXACT:\n # For speed, we use floating point maths.\n return int(n**0.5)\n return _isqrt(n)", "def ytrue(t):\n return np.array([np.sin(t) + np.cos(t)])", "def taucurveder(self, p, x):\n y = -(p[1] * numpy.exp((p[2] + x) / p[3]) / p[3] - p[4] * numpy.exp(-(p[5] + x) / p[6]) / p[6]) / (\n p[1] * numpy.exp((p[2] + x) / p[3]) +\n p[4] * numpy.exp(-(p[5] + x) / p[6])) ** 2.0\n # print 'dy: ', y\n return y", "def get_bprop_rsqrt(self):\n\n def bprop(x, out, dout):\n grad = F.fill(F.dtype(x), F.shape(x), -0.5) / (F.sqrt(x)*x)\n dx = dout * grad\n return (dx,)\n return bprop", "def single_qubit_ansatz(theta: float, phi: float) -> circuit.qc:\n\n qc = circuit.qc('single-qubit ansatz Y')\n qc.qubit(1.0)\n qc.rx(0, theta)\n qc.ry(0, phi)\n return qc", "def generateRHS(T, sigma, qdx):\n\n b = T[1:-1]*1./sigma\n # Consider Dirichlet BC\n b[0] += T[0]\n # Consider Neumann BC\n b[-1] += qdx\n\n return b", "def isqrt(inputnum):\n if inputnum >= 0:\n return math.sqrt(inputnum)\n else:\n return complex(0, math.sqrt(-inputnum))", "def q(self):\n return self._x", "def y(x,xi):\n return np.exp(-xi)-np.exp(-xi)*(x-xi)", "def ytrue(t):\n return np.array([np.exp(lam*t)])", "def SqrtNot(d=1):\n\n return Operator(0.5 * np.array([[1 + 1j, 1 - 1j],\n [1 - 1j, 1 + 1j]])).tensor_power(d)", "def theta_given_s(theta, q):\n if q == 0:\n return .3333\n else:\n if theta == 0:\n return 0.25\n elif theta == 1:\n return 0.25\n else:\n return 0.5", "def estimateCt(y, inp):\n\treturn getK2(inp) * (1 - math.exp(-getLambda(inp) * y / getY90(inp)))", "def Get_RawOutY_Value(self):\r\n l = self.__readFromRegister(self.__REG_R_OUT_Y_L, 0xff)\r\n h_u2 = self.__readFromRegister(self.__REG_R_OUT_Y_H, 0xff)\r\n h = bitOps.TwosComplementToByte(h_u2)\r\n if (h < 0):\r\n return (h*256 - l) * self.gain\r\n elif (h >= 0):\r\n return (h*256 + l) * self.gain", "def srwf(xi):\n\treturn np.sqrt(wienergain(xi)) # SRWF gain function.", "def Tlosses(self, dq = np.zeros(1) , ddq = np.zeros(1)): \n \n T = np.dot( self.Ia , ddq ) + np.dot( self.Da , dq )\n \n return T", "def get_sqrt_2():\n return 1.41421356", "def y(self):\n return self[1]", "def qtf(self, vw, th, gp, psi_l, lai, dt):\n\t\t#if the amount of water in tank is less than amount that will be absorbed by plant in timestep dt, then what's left will be absorbed \n\t qtt = th - self.qwf(vw, th, gp, psi_l, lai, dt)\n\t if self.tx*self.ZT*10**6 <= 0:\n\t return 0.\n\t elif self.tx*self.ZT*10**6 <= qtt*dt:\n\t return (self.tx*self.ZT*10**6/dt)\n\t else:\n\t return qtt", "def recall_ex_single(yt, yp):\n tp, t = 0, 0\n for y0, y1 in zip(yt, yp):\n if y0 == 1:\n t += 1\n tp += 1 if y1 == 1 else 0\n\n return float(tp) / float(t) if t != 0 else 0.0", "def isqrt(n):\r\n x = n\r\n y = (x + 1) // 2\r\n while y < x:\r\n x = y\r\n y = (x + n // x) // 2\r\n return x", "def t(o, r):\n return (r/o)**2", "def r2(t, y):\n\treturn r2_score(t, y)", "def rsq(x,y):\n return correlation(x,y)**2", "def y(self):\n return (self.__y)", "def ret_f(t,y):\n\n f = np.zeros(3)\n f[0] = 77.27*(y(1) - y(0)*y(1)+ y(0)-8.375e-6*y(0)*y(0))\n f[1] = (1.0/77.27)*(-y(1)-y(0)*y(1)+y(2))\n f[2] = 0.161*(y(0)-y(2))\n\n return f", "def RMSE(y,yhat):\r\n return(np.sqrt(MSE(y,yhat)))", "def squared(x=2):\n return x ** 2", "def snRate(self, z):\n res = self.alpha * (1.0 + z)**self.beta \n res *= ((self.cosmo.h / 0.7) **3.) \n return res", "def t(self: Q) -> np.array:\n\n return np.array([self.t])", "def Sqrt(a):\n c = Div(Add(a, 1), 2)\n b = a\n while(c < b):\n b = c\n c = Div(Add(Div(a, c), c), 2)\n return c", "def sys(self, t, x, u):\n xk = fft(x)\n\n # 4/3 truncation rule\n # dealiasing due to triple nonlinearity\n # note: you could do zero-padding to improve memory\n # efficiency\n xk[self.n_states // 4 : 3 * self.n_states // 4] = 0j\n x = ifft(xk)\n\n yk = (-self.k**2 * xk.ravel() / 2) * 1j\n y = ifft(yk) + 1j * abs(x) ** 2 * x + u\n return y", "def r8_y1x(t):\n y1x = 20.0 / (1.0 + 19.0 * np.exp(- 0.25 * t))\n return(y1x)", "def _q_x(self):\n lambda_r = self.latt_par['lambda_r'].value \n return 2*np.pi*self.k/lambda_r", "def y(self):\n #print(xdb.xray_lines(self.element)[self.line].energy/1000)\n #self.output_params={}\n #print(self.getBulkCon(self.element,self.botchem,self.botden))\n x = self.x + self.qoff\n if not self.__fit__:\n self.output_params['scaler_parameters']={}\n return self.fluCalFun(x)\n #return self.x" ]
[ "0.6507779", "0.62822163", "0.62746733", "0.6241614", "0.61534065", "0.61482567", "0.61480504", "0.6128632", "0.6057746", "0.6013973", "0.59892964", "0.5979296", "0.5942681", "0.592979", "0.59215605", "0.59135765", "0.5904457", "0.59001505", "0.58625853", "0.58580345", "0.5816371", "0.5792469", "0.57709336", "0.5749432", "0.57242894", "0.57151437", "0.57004875", "0.56880474", "0.56829816", "0.5635147", "0.56156933", "0.5613603", "0.5611805", "0.5601607", "0.5591079", "0.559095", "0.5579557", "0.5577242", "0.55708545", "0.5568354", "0.5521519", "0.5516461", "0.550872", "0.55032384", "0.54942286", "0.5490989", "0.54878074", "0.5465194", "0.54573303", "0.54538786", "0.5446378", "0.5442216", "0.54396546", "0.54282975", "0.5422806", "0.54157716", "0.54103845", "0.5397074", "0.53945494", "0.53881204", "0.5385469", "0.53757596", "0.5370383", "0.5364883", "0.5364286", "0.5358897", "0.5357334", "0.53447646", "0.5337602", "0.5309385", "0.53005224", "0.5297559", "0.52960634", "0.52924585", "0.52920026", "0.5291981", "0.5291814", "0.52821696", "0.5277578", "0.5271292", "0.52698714", "0.5265727", "0.5263979", "0.52600884", "0.52582806", "0.5257904", "0.52573675", "0.52521926", "0.5245079", "0.52435446", "0.5242328", "0.52405226", "0.5238597", "0.52346784", "0.5233626", "0.52299803", "0.5225998", "0.5225142", "0.5224593", "0.52188385" ]
0.61223656
8
Returns the single qubit gate Sqrt(W)
def sqrtw(): return Operator([[(1.+1.j)/2,-1.j/np.sqrt(2)],[1./np.sqrt(2),(1.+1.j)/2]])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def qrst_tm(x):\n return 0.2228*x - 0.6685", "def W(self):\n if not self.isVaild():\n pass\n return self.Wq() + 1.0/self.muy", "def sq(x):\n\n return x ** x", "def calc_q_square(self):\n return self._q_x()**2 + self._q_z()**2", "def sqrty():\n return Operator([[(1.+1.j)/2,(-1-1.j)/2],[(1.+1.j)/2,(1.+1.j)/2]])", "def s(self) -> NumType:\n return abs(self._pwr)", "def sqrtx():\n return Operator([[(1.+1.j)/2,(1.-1.j)/2],[(1.-1.j)/2,(1.+1.j)/2]])", "def q(self) -> float:\n return self._pwr.imag", "def SqrtSwap():\n\n return Operator(np.array([[[[ 1.0, 0.0],\n [ 0.0, 0.5 * (1 + 1j)]],\n [[ 0.0, 0.0],\n [ 0.5 * (1 - 1j), 0.0]]],\n [[[ 0.0, 0.5 * (1 - 1j)],\n [ 0.0, 0.0]],\n [[ 0.5 * (1 + 1j), 0.0],\n [ 0.0, 1.0]]]]))", "def q_(w,R,lam=1064.0e-9):\n\n if R!=np.inf:\n q=np.pi*w**2*R/(np.pi*w**2-1j*R*lam)\n else:\n q=1j*np.pi*w**2/lam\n\n return q", "def _rsq(self):\n return self._ss_reg / self._ss_tot", "def sq(self, x):\n\t\treturn x * x", "def srwf(xi):\n\treturn np.sqrt(wienergain(xi)) # SRWF gain function.", "def findQ(H_s):\n nl,dl = symToTransferFn(H_s)\n syst = sp.lti(nl,dl)\n p1,p2 = syst.poles[0], syst.poles[1]\n return np.sqrt(abs(p1*p2))/abs(p1+p2)", "def wt(self):\n return self._wt", "def qtf(self, vw, th, gp, psi_l, lai, dt):\n\t\t#if the amount of water in tank is less than amount that will be absorbed by plant in timestep dt, then what's left will be absorbed \n\t qtt = th - self.qwf(vw, th, gp, psi_l, lai, dt)\n\t if self.tx*self.ZT*10**6 <= 0:\n\t return 0.\n\t elif self.tx*self.ZT*10**6 <= qtt*dt:\n\t return (self.tx*self.ZT*10**6/dt)\n\t else:\n\t return qtt", "def stirling(n):\n return n**n*isqrt(2*math.pi*n)/math.e**n", "def Wq(self):\n if not self.isVaild():\n pass\n return self.Lq()/self.lamda", "def w0(self, Ppump):\n\n EsatL, TR, tauL = self.EsatL, self.TR, self.tauL\n Pst, gst = self.steadystate(Ppump)\n r = Pst / EsatL\n\n w0 = np.sqrt(r * gst / TR +\n Pst * self.dqP_dEP(Pst * TR) * (1. / tauL + r))\n return(w0)", "def sqsigned(x):\n return tf.sign(x) * (x ** 2)", "def _blr_tsqr(obj):\n nb = obj.nb[0]\n A = obj\n Q = core.BlockLowRank(numpy.full((nb, 1), None))\n B = numpy.full(nb, None)\n\n for i in range(nb):\n if isinstance(A[i, 0], core.LowRank):\n Qi, Ri = qr(A[i, 0].U)\n Q[i, 0] = Qi\n B[i] = Ri * A[i, 0].V\n else:\n B[i] = A[i, 0]\n\n B = numpy.vstack(B)\n\n if B.shape[0] < B.shape[1]:\n Z = numpy.zeros((B.shape[1] - B.shape[0], B.shape[1]))\n B = numpy.vstack([B, Z])\n\n Qb, R = qr(B)\n rstart, rend = 0, 0\n\n for i in range(nb):\n if isinstance(A[i, 0], core.LowRank):\n rstart = rend\n rend = rend + A[i, 0].rank\n U = Q[i, 0]\n V = Qb[rstart:rend, :]\n Q[i, 0] = core.LowRank((U, V), A[i, 0].method, A[i, 0].eps)\n else:\n rstart = rend\n rend = rend + A[i, 0].shape[0]\n Q[i, 0] = Qb[rstart:rend, :]\n\n return Q, R", "def sqrtdenest (expr):\n expr = sympify(expr)\n if expr.is_Pow and expr.exp is S.Half: #If expr is a square root\n return denester([expr])[0]\n return expr", "def sqrt_shifted_rate(self, t, i):\n return np.real(self._sqrt_shifted_rates[i](t))", "def square_func(i, T, amp, p = 10000):\n if (i//p)%2 == 0:\n return T + amp\n else:\n return T - amp", "def sqrt(self):\n\n\t\t# Maintain state of self and create new trace variable new_var\n\t\tnew_var = Var(self.val, self.der)\n\t\treturn new_var.__pow__(0.5)", "def sq_custom(f,T,a=0,b=0):\n fs=44100\n t=np.linspace(0,T,T*fs)\n A=np.floor(a*fs*T)\n D=np.floor(b*fs*T)\n S1=np.linspace(0,1,A)\n S2=np.ones(T*fs-A-D)\n S3=np.linspace(1,0,D)\n S0=signal.square(2 * np.pi * f * t)\n return(np.hstack((S1,S2,S3))*S0)", "def sqr(x):\n return x ** 2", "def soundspeed(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n g_tt = liq_g(2,0,temp,pres)\n g_tp = liq_g(1,1,temp,pres)\n g_pp = liq_g(0,2,temp,pres)\n csqinv = (g_tp**2/g_tt - g_pp) / g_p**2\n c = csqinv**(-.5)\n return c", "def my_square(d):asaasasassssssssssssssssssssssssss\n\t return (d ** 3)", "def q(self, s, a):\n # The Q value of the current state is based on the max Q value of the next state.\n next_state_max_q = max([self.qtable[s[0]+x][s[1]+y] for (x,y) in self.maze.moves()])\n self.qtable[s[0]+a[0]][s[1]+a[1]] = (self.qtable[s[0]+a[0]][s[1]+a[1]]\n + self.alpha * (self.r(s,a) + self.gamma * next_state_max_q\n - self.qtable[s[0]+a[0]][s[1]+a[1]]))\n\n return self.qtable[s[0]+a[0]][s[1]+a[1]]", "def circuit_one_qubit_one_param_rx_ry(inpt):\n qml.RX(inpt[0], wires=0)\n qml.RY(inpt[0], wires=0)\n return qml.expval(qml.PauliZ(0))", "def get_s( self ):\n\n # initialize scaling factor as unknown variable, assuming it's real and\n # greater than zero\n _s = Symbol( 's', real = True, positive = True )\n\n # solve for scaling factor (first argument is expression set equal to zero)\n s = solve( self.a * _s ** self.n + self.b * _s - 1, _s )\n\n # save result as float\n self.s = float( s[ 0 ] )", "def rsq(self):\n return np.squeeze(self._rsq)", "def WI_statewide_eqn(Qm, A, Qr, Q90):\n Bf = (Qm / A) * (Q90 / Qr)\n Qb = 0.907 * A**1.02 * Bf**0.52\n return Qb.copy(), Bf.copy()", "def refrac(w_p, w):\n\n return sqrt(1 - pow(w_p / w, 2))", "def Sqrt(a):\n c = Div(Add(a, 1), 2)\n b = a\n while(c < b):\n b = c\n c = Div(Add(Div(a, c), c), 2)\n return c", "def qtt_sym(lmax,rlmin,rlmax,fC,Tlm,nside=None):\n if nside is None: nside= lmax\n return libcurvedsky.rec_tau.qtt_sym(lmax,rlmin,rlmax,fC,Tlm,nside)", "def Q_i(params):\n Q = params['Q'].value\n Qe = Q_e(params)\n return (Q ** -1 - np.real(Qe ** -1)) ** -1", "def getQuadOp(self):\n return self.basis2grid(np.eye(self.nb), axis = 0)", "def S(self, qubit_expr):\n self.apply_gate_operation(cirq.ops.S, qubit_expr)", "def isqrt( a, b ):\n return a*a - b", "def std(self):\n\t\treturn np.sqrt(0.6) #obtained by integrating 1.5x^4 from -1 to 1", "def isqrt(n): # newton (from stackoverflow)\n if n < 0:\n print(f\"segur que vols fer l'arrel de {n}?\")\n n = -n\n x = n\n y = (x + 1) // 2\n while y < x:\n x = y\n y = (x + n // x) // 2\n return x", "def sqr(x):\n return x * x", "def w(self) -> float:\n return self.A[0] if self.scalar_vector else self.A[3]", "def _magsqr(z):\n return np.abs(z) ** 2", "def q(self):\n return self._x", "def Qfun(Phieq,Phi,Phibar,taurad):\n #note Q is different from Perez-Becker and Showman by a factor of g (for consistency with Phi vs H)\n Q=(1/taurad)*(Phieq-(Phi+Phibar))\n\n return Q", "def my_square(y):\n\treturn (y ** 2)", "def yule_q(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = a + b + c + d\n\n if n == 0:\n return np.nan\n elif p1 == n:\n # c and d are zero\n return _div(a - b, p1)\n elif p2 == n:\n # b and d are zero\n return _div(a - c, p2)\n elif q1 == n:\n # a and b are zero\n return _div(d - c, q1)\n elif q2 == n:\n # a and c are zero\n return _div(d - b, q2)\n\n return _div(self.covar(), a * d + b * c)", "def circuit_one_qubit_one_param_h_ry(inpt):\n qml.Hadamard(wires=0)\n qml.RY(inpt[0], wires=0)\n return qml.expval(qml.PauliZ(0))", "def calc_q_rad(n0, IAM_b, I_direct, IAM_d, I_diffuse, tilt):\n q_rad_Wperm2 = n0 * IAM_b * I_direct + n0 * IAM_d * I_diffuse * (1 + cos(tilt)) / 2\n return q_rad_Wperm2", "def qwf(self, vw, ev, gp, psi_l, lai, dt):\n\t\t#if the amount of water in storage is less than amount that will be absorbed by plant in timestep dt, then what's left will be absorbed \n\t qw = (self.gwf(self.psi_wf(self.vw,self.d1, self.d1, self.ns, self.tl), self.H, self.J)*(self.psi_wf(self.vw, self.d1, self.d1, self.ns, self.tl) - (ev*(1. - self.F_CAP))/(lai*gp) - psi_l)*lai)\n\t if self.vw == 0:\n\t return 0.\n\t elif self.vw*10**6 <= qw*dt:\n\t return (self.vw*10**6/dt)\n\t else:\n\t return qw", "def get_S_r(self):\n\n S_r = np.sum((self.eta_model - self.eta_exp) ** 2.)\n\n return S_r", "def get_t_tconst(qrsidx):\n\n def _t_tconst(pattern, twave):\n \"\"\"\n Temporal constraints of the T Waves wrt the corresponding QRS complex.\n \"\"\"\n BASIC_TCONST(pattern, twave)\n tnet = pattern.last_tnet\n obseq = pattern.obs_seq\n idx = pattern.get_step(twave)\n beats = pattern.evidence[o.QRS]\n qidx = qrsidx + len(beats) if qrsidx < 0 else qrsidx\n qrs = beats[qidx]\n if qidx > 1:\n refsq = beats[qidx - 1].earlystart - beats[qidx - 2].lateend\n tnet.add_constraint(qrs.time, twave.end, Iv(0, max(0, refsq - C.TQ_INTERVAL_MIN)))\n if idx > 0 and isinstance(obseq[idx - 1], o.PWave):\n pwave = obseq[idx - 1]\n tnet.add_constraint(\n pwave.end, twave.start, Iv(C.ST_INTERVAL.start, C.PQ_INTERVAL.end + C.QRS_DUR.end)\n )\n if qidx < len(beats) - 1:\n tnet.set_before(twave.end, beats[qidx + 1].start)\n # ST interval\n tnet.add_constraint(qrs.end, twave.start, C.ST_INTERVAL)\n # QT duration\n tnet.add_constraint(qrs.start, twave.end, C.N_QT_INTERVAL)\n # RT variation\n if qidx % 2 == 0:\n rtmean, rtstd = pattern.hypothesis.meas.rt\n # We also define a constraint on T wave end based on the last\n # distance between normal and ectopic QRS.\n if qidx > 0:\n tnet.add_constraint(\n qrs.end, twave.end, Iv(0, beats[qidx - 1].earlystart - beats[qidx - 2].lateend)\n )\n else:\n rts = _get_measures(pattern, 1)[2]\n rtmean, rtstd = np.mean(rts), np.std(rts)\n if rtmean > 0:\n # The mean and standard deviation of the PQ measurements will\n # influence the following observations.\n maxdiff = C.QT_ERR_STD if len(pattern.evidence[o.TWave]) < 10 else rtstd\n maxdiff = max(maxdiff, C.MIN_QT_STD)\n interv = Iv(int(rtmean - 2.5 * maxdiff), int(rtmean + 2.5 * maxdiff))\n # We avoid possible inconsistencies with constraint introduced by\n # the rhythm information.\n try:\n existing = tnet.get_constraint(qrs.time, twave.end).constraint\n except KeyError:\n existing = Iv(-np.inf, np.inf)\n if interv.overlap(existing):\n tnet.add_constraint(qrs.time, twave.end, interv)\n\n return _t_tconst", "def generateRHS(T, sigma, qdx):\n\n b = T[1:-1]*1./sigma\n # Consider Dirichlet BC\n b[0] += T[0]\n # Consider Neumann BC\n b[-1] += qdx\n\n return b", "def get_bprop_rsqrt(self):\n\n def bprop(x, out, dout):\n grad = F.fill(F.dtype(x), F.shape(x), -0.5) / (F.sqrt(x)*x)\n dx = dout * grad\n return (dx,)\n return bprop", "def my_square(y):\n\treturn (y **2)", "def norm_sqr(x):\n return inner_prod(x, x)[0]", "def eq(w, x):\n return (-w[1]*x - w[0]) / w[2]", "def square_value(s):\n return s ** 2", "def r(o, t):\n return o*t**0.5", "def qr(T):\n Q, R = splinalg.qr(T, mode='economic')\n sR = np.sign(np.real(np.diag(R)))\n sR[sR == 0] = 1\n Q, R = Q * sR, sR.reshape([-1, 1]) * R\n # maxQ, minQ = Q.max(0), Q.min(0)\n # maxR, minR = R.max(1), R.min(1)\n # ind = (np.abs(minQ) > maxQ) & (np.abs(minR) > maxR)\n # Q[:, ind] *= -1\n # R[ind] *= -1\n return Q, R", "def R(self):\n\t\treturn (arange(self.rbins) + 0.5) * (self.cbins - 0.5) / self.rbins", "def isqrt(inputnum):\n if inputnum >= 0:\n return math.sqrt(inputnum)\n else:\n return complex(0, math.sqrt(-inputnum))", "def calc_trig(self, tau):\n if self.A[self.k,self.p] != 0.0:\n if tau > 0:\n t = -tau + np.sqrt(tau**2 + 1.0)\n else:\n t = -tau - np.sqrt(tau**2 + 1.0)\n \n c = 1.0/(1.0 + t**2)\n s = t*c\n else:\n c = 1.0\n s = 0.0\n return c, s", "def SqrtNot(d=1):\n\n return Operator(0.5 * np.array([[1 + 1j, 1 - 1j],\n [1 - 1j, 1 + 1j]])).tensor_power(d)", "def g(self, RD):\n g = 1 / np.sqrt((1 + 3 * np.power(self.q, 2)) / np.power(np.pi, 2)) \n \n return g", "def get_w_star(self):\n from .. import physics as phys\n return phys.w_star(self)", "def get_w_star(self):\n from .. import physics as phys\n return phys.w_star(self)", "def get_w_star(self):\n from .. import physics as phys\n return phys.w_star(self)", "def get_w_star(self):\n from .. import physics as phys\n return phys.w_star(self)", "def sqr(f):\n return f.per(dmp_sqr(f.rep, f.lev, f.dom))", "def R_square(self,parameterValues=None):\n sst = self.SST(parameterValues)\n cost = self.Cost(parameterValues)\n return 1.- cost/sst", "def InductionsFromPrescribedCtCq_ST(vr_bar,Ct,Cq,Lambda,bSwirl):\n lambda_r=Lambda*vr_bar\n # --- Stream Tube theory\n a_ST = 1/2*(1-np.sqrt(1-Ct))\n if bSwirl:\n a_prime_ST = Cq/(4*(1-a_ST)*lambda_r)\n # a_prime_ST = 0.5*(sqrt(1+(4*a_ST.*(1-a_ST)./lambda_r.^2))-1);\n else:\n a_prime_ST =0\n return a_ST,a_prime_ST", "def circuit_one_qubit_one_param_rx(inpt):\n qml.RX(inpt[0], wires=0)\n return qml.expval(qml.PauliZ(0))", "def getpower(self):\n return (self.cwt*numpy.conjugate(self.cwt)).real", "def qgset(x):\n return 0.2855*x - 0.8565", "def _get_R(self, net_r_amp):\n return np.abs(net_r_amp)**2", "def sigwt(self):\n return self._return_if('_sigwt')", "def rt_general(lam, T, I_0, tau):\n\n B_lam = planck_w(lam, T)\n tau_total = tau[-1]\n delta_tau = tau[1:] - tau[0:-1]\n integrate_radiation = B_lam[1:] * np.exp(- (tau_total - tau[:])) * delta_tau\n return I_0 * np.exp(-tau) + B_lam * (1 - np.exp(-tau))", "def square(q_1: Q) -> Q:\n\n end_q_type = f\"{q_1.q_type}²\"\n\n qxq = _commuting_products(q_1, q_1)\n\n sq_q = Q(q_type=end_q_type, representation=q_1.representation)\n sq_q.t = qxq[\"tt\"] - qxq[\"xx+yy+zz\"]\n sq_q.x = qxq[\"tx+xt\"]\n sq_q.y = qxq[\"ty+yt\"]\n sq_q.z = qxq[\"tz+zt\"]\n\n return sq_q", "def isqrt(n):\n if n < 0:\n raise ValueError('square root not defined for negative numbers')\n elif n <= MAX_EXACT:\n # For speed, we use floating point maths.\n return int(n**0.5)\n return _isqrt(n)", "def scalar(self):\n return self.q[0]", "def isqrt(n):\r\n x = n\r\n y = (x + 1) // 2\r\n while y < x:\r\n x = y\r\n y = (x + n // x) // 2\r\n return x", "def value(self): \r\n c = self.nd1() * self.s * math.exp(-self.div * self.t)\r\n c -= self.nd2() * self.x * math.exp(-self.rf * self.t)\r\n \r\n return c", "def stump_S(z) :\n\n if z > 0:\n sz = sqrt(z) \n return (sz - sin(sz))/pow(sz,3)\n elif z < 0 :\n s_z = sqrt(-z) \n # According to the equation the denominatori is pow(sqrt(z),3)\n return (sinh(s_z) - s_z)/pow(s_z,3)\n else :\n return 0.1666666666666666", "def twiny(self):\n return self.altx()", "def T(self, q = np.zeros(1) , dq = np.zeros(1) , ddq = np.zeros(1) , R = 1 ): \n \n F = self.F( q , dq , ddq )\n \n Tl = self.Tlosses( dq , ddq )\n \n T = np.dot( 1. / R , F ) + np.dot( R , Tl ) \n \n return T", "def R_square(self,parameterValues):\n sst = self.SST(parameterValues)\n cost = self.Cost(parameterValues)\n return 1.- cost/sst", "def _q_x(self):\n lambda_r = self.latt_par['lambda_r'].value \n return 2*np.pi*self.k/lambda_r", "def my_square(y):\n\treturn(y ** 2)", "def get_sqrt_2():\n return 1.41421356", "def twinx(self):\n return self.alty()", "def q_wi_per_head(self):\n assert self.ff % self.heads == 0\n # fuses W and V from swiGLU, and the q computation\n # note both W and V have output dim .ff, not\n # 2/3 ff as in https://arxiv.org/pdf/2002.05202.pdf\n return (self.ff * 2 // (self.heads-self.padded_heads)) + self.qkv", "def get_wave(q):\n\n approximant = 'SEOBNRv4'\n chi1 = [0,0,0]\n chi2 = [0,0,0]\n deltaTOverM = 0.1\n omega0 = 2e-2\n\n t, h = LALPy.generate_LAL_waveform(approximant, q, chi1, chi2, deltaTOverM, omega0)\n\n Amp = np.abs(h)\n peakIdx = np.argmax(Amp)\n\n t -= t[peakIdx]\n\n tmin = -500\n if min(t) > tmin:\n raise Exception('Data not long enough, decrease omega0.')\n keepIdx = t - tmin > -1e-3 # simple hack to ensure t_vec is always nearly the same\n t = t[keepIdx]\n h = h[keepIdx]\n\n tmax = 100\n keepIdx = t - tmax < 1e-3\n t = t[keepIdx]\n h = h[keepIdx]\n\n return t, h", "def my_square(x):\n return x ** 2", "def circuit_one_qubit_two_params(inpt):\n qml.RY(inpt[0], wires=0)\n qml.RX(inpt[1], wires=0)\n return qml.expval(qml.PauliZ(0))", "def circuit():\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))", "def square_wave(self, sqw=0, out=0):\n rs0 = 1 if sqw == 4 or sqw == 32 else 0\n rs1 = 1 if sqw == 8 or sqw == 32 else 0\n out = 1 if out > 0 else 0\n sqw = 1 if sqw > 0 else 0\n reg = rs0 | rs1 << 1 | sqw << 4 | out << 7\n self.i2c.writeto_mem(self.addr, CONTROL_REG, bytearray([reg]))" ]
[ "0.6371148", "0.63678783", "0.627648", "0.6267146", "0.6152248", "0.61319304", "0.6127212", "0.60720533", "0.6038319", "0.603076", "0.6025137", "0.5983519", "0.5961708", "0.58924997", "0.58151203", "0.5788785", "0.57599264", "0.5748499", "0.57308507", "0.571389", "0.57128286", "0.57062155", "0.56962067", "0.56694174", "0.56528324", "0.5638817", "0.561916", "0.56122494", "0.55726075", "0.5554412", "0.5547062", "0.55451196", "0.55302745", "0.5529572", "0.5515289", "0.5514569", "0.5495782", "0.54783416", "0.5459023", "0.54534745", "0.544047", "0.54285187", "0.54254854", "0.5421294", "0.5416244", "0.5395194", "0.5391166", "0.5387137", "0.53869873", "0.538148", "0.5378066", "0.53780574", "0.53719145", "0.53645444", "0.5355413", "0.53499997", "0.5349689", "0.5345791", "0.5344556", "0.5336816", "0.53339106", "0.53318846", "0.53305787", "0.5325444", "0.5324514", "0.5320403", "0.53199834", "0.5315532", "0.5301437", "0.5301437", "0.5301437", "0.5301437", "0.5299951", "0.52982986", "0.52895176", "0.5271945", "0.5271368", "0.5270121", "0.5269384", "0.52683145", "0.52646893", "0.5262862", "0.525882", "0.5253435", "0.5251187", "0.5241679", "0.52333784", "0.52304095", "0.52293", "0.52224964", "0.52216727", "0.5216328", "0.52012026", "0.5198641", "0.5194478", "0.5193271", "0.51880175", "0.51785725", "0.51682943", "0.51657903" ]
0.54470795
40
Constructor parent reference to the parent widget QWidget
def __init__(self, parent=None): super(ProgressDlg, self).__init__(parent) self.setupUi(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(self, parent):\n self.widget = QtCore.QObject(parent)", "def __init__(self, parent=None):\n super(Dialog, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(QCTP, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(Representative, self).__init__(parent)\n self.setupUi(self)", "def create(self, parent):\n self.widget = _QMainWindow(parent)", "def __init__(self, parent):\n self.parent = parent\n self.dialog = None", "def __init__(self, parent):\n QtGui.QMenu.__init__(self, parent)\n self.parent = parent", "def __init__(self, printer, parent=None):\n QtGui.QWidget.__init__(self, printer, parent)", "def __init__(self, parent=None):\n self._window = None\n\n self.setup_ui()", "def __init__(self, parent=None):\n super(union_Dialog, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(Inj, self).__init__(parent)\n self.setupUi(self)", "def create(self, parent):\n self.widget = QFrame(parent)", "def createWidget(self, parent):\n raise NotImplementedError()", "def __init__(self, parent):\r\n\r\n BasicDialog.__init__(self, parent, title=None)", "def __init__(self, parent):\r\n Frame.__init__(self, parent) \r\n \r\n self.parent = parent\r\n self.initUI()", "def __init__(self, parent=None):\n super(StyledInputDialog, self).__init__(parent)\n self.setupUi(self)\n self.input = None", "def __init__(self, parent):\n pass", "def __init__(self, parent):\n pass", "def __init__(self, parent):\n pass", "def __init__(self, parent):\n self._parent = parent", "def __init__(self, parent=None):\n super(RobotSelection, self).__init__(parent)\n self.parent = parent\n self.initUI()", "def __init__(self, parent: View):\n self.parent = parent\n self.root = self.parent.root\n # Content frame\n self.frame = tk.Frame(self.parent.frame)\n # Reference\n self.visible = False", "def __init__(self, parent):", "def __init__(self, parent=None):\n super(QAccountWidget, self).__init__(parent)\n self.setupUi(self)", "def create(self, parent):\n self.widget = QtGui.QScrollArea(parent)", "def __init__(self, parent=None):\n super().__init__(parent)\n\n topLayout = QtGui.QVBoxLayout(self)\n self.setLayout(topLayout)\n self.setTitle(_('Choose export format subtype'))\n self.subtypeButtons = QtGui.QButtonGroup(self)\n self.subtypeButtons.buttonClicked[int].connect(self.setCurrentSubtype)", "def __init__(self):\n self.stack = QWidget()", "def __init__(self, parent=None):\n # Inherited from QMainWindow\n if sys.platform == 'darwin':\n # Workaround for Qt issue on OS X that causes QMainWindow to\n # hide when adding QToolBar, see\n # https://bugreports.qt-project.org/browse/QTBUG-4300\n super(BpMainWindow, self).__init__(parent, Qt.MacWindowToolBarButtonHint)\n else:\n super(BpMainWindow, self).__init__(parent)\n\n # temporary variable\n self._temp_dir = None\n self.is_save_configure = False\n\n # pre-define a model variable\n self.model = None", "def __init__(self, parent: QWidget):\n super().__init__(parent)\n DiagramFieldView.__diagram_field = self\n\n self.__list: List[DiagramView] = []\n self.__dialog: Dialog = None\n self.__diagram_layout: QVBoxLayout = QVBoxLayout()\n self.__button_layout: QHBoxLayout = QHBoxLayout()\n self.__start_button: StartButtonView = StartButtonView()\n self.__maximize_button: QPushButton = QPushButton()\n\n self.__diagram_group: QtWidgets.QGroupBox = QtWidgets.QGroupBox(self)\n self.__group_layout: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout(self.__diagram_group)\n self.__stretch_widget: QtWidgets.QWidget = QtWidgets.QWidget(self)\n self.__diagram_count: int = 0\n\n self.__start_button.start_signal.connect(self.__clear_diagrams)\n self.__maximize_button.clicked.connect(self.__maximize_on_click)\n ManagerModel.set_diagram_notifier(self)\n self.__init_ui()", "def __init__(self, parent=None):\n super(SelfCarryAddressSeek, self).__init__(parent)\n self.setupUi(self)", "def create(self, parent):\n self.widget = QImageView(parent)", "def __init__(self, window: QWidget, parent=None):\n QWidget.__init__(self, parent)\n\n self._window = window\n self._mousePressed = False", "def __init__(self, parent):\n QtGui.QWidget.__init__(self, parent)\n self.ui = Ui_FindReplaceForm()\n self.ui.setupUi(self)\n\n self.ui.errorLabel.setText(\"\")\n\n self.ui.textToFind.textChanged.connect(self.text_to_find_changed)\n self.ui.textToFind.textChanged.connect(self.validate_regexp)\n\n self.ui.regexCheckBox.toggled.connect(self.regexp_selected)\n\n self.ui.findButton.clicked.connect(self.find)\n self.ui.closeButton.clicked.connect(parent.close)\n\n self.ui.replaceButton.clicked.connect(self.replace)\n self.ui.replaceAllButton.clicked.connect(self.replace_all)\n\n self.textedit = None\n self.regexp = QtCore.QRegExp()\n self.textcursor = None", "def __init__(self, parent, frame):\n\t\tself.frame = frame\n\n\t\t# Populate line edit with shot name\n\t\tself.frame.shot_lineEdit.setText(parent.self_name)", "def __init__(self, parent, **kwargs):\n PyGlassWidget.__init__(self, parent, **kwargs)\n\n self.deployBtn.clicked.connect(self._handleDeployClick)\n self.cancelBtn.clicked.connect(self._handleCancelClick)\n\n self._canceled = True\n self._includeEmails = False\n self._buildMessage = u''", "def __init__(self, parent):\n super(MasterDialog, self).__init__(parent)\n self.setupUi(self)\n self.setAttribute(Qt.WA_DeleteOnClose)\n self.ms = parent\n self.devenvironment = self.ms.devenvironment\n if not self.devenvironment:\n self.setCursor(Qt.BlankCursor)\n self.fill_combobox()\n self.employee_id = 0\n # connect the buttons\n self.PB_change.clicked.connect(self.change_clicked)\n self.PB_back.clicked.connect(self.back_clicked)\n self.CB_employee.activated.connect(self.combobox_change)\n self.LE_first_name.clicked.connect(lambda: self.ms.lineedit_clicked(self.LE_first_name))\n self.LE_last_name.clicked.connect(lambda: self.ms.lineedit_clicked(self.LE_last_name))", "def __init__(self, parent=None):\n super(CommentDialog, self).__init__(parent)\n self.createDialog()\n self.createConnections()", "def __init__(self, *args, **kwargs):\n\n\t\t\tLOGGER.debug(\"> Initializing '{0}()' class.\".format(self.__class__.__name__))\n\n\t\t\tsuper(QWidget, self).__init__(*args, **kwargs)\n\n\t\t\tself.__uiFile = file\n\n\t\t\tself.__geometry = None\n\n\t\t\tself.setupUi(self)", "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n self.path=''\n self.bool=0\n self.child=0\n self.click=\" \"", "def __init__(self, parent):\r\n\r\n pre = wx.PrePyWindow()\r\n \r\n self._tabs = None\r\n self._rect = wx.Rect(0, 0, 200, 200)\r\n self._tab_ctrl_height = 20\r\n self._tab_rect = wx.Rect() \r\n self._parent = parent\r\n \r\n self.PostCreate(pre)", "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n \n self.setWindowTitle(\"RPI HMI - pH Debug\") # Title creation", "def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.setText(\"The document has been modified.\")\n self.setInformativeText(\"What do you want to do?\")\n self.setStandardButtons(QtGui.QMessageBox.Save | QtGui.QMessageBox.Discard |\n QtGui.QMessageBox.Cancel)\n self.setDefaultButton(QtGui.QMessageBox.Save)\n self.setWindowTitle(\"Changes have been made\")\n self.setWindowIcon(QtGui.QIcon(\"data/images/question.png\"))", "def __init__(self, parent=None):\n super().__init__()\n\n self.parent = parent\n\n # plot object, can be 2D or 3D\n self.plt = None", "def __init__(self, *args, **kwargs):\n\n super(FlirCameraWidget, self).__init__(*args, **kwargs)\n #self.loadForm()\n self.window = Ui_MainWindow()\n self.window.setupUi(self)\n\n self.initUI()\n Styles(self)", "def __init__(self, parent):\n # parent is the main frame of PyCorrFit\n self.parent = parent\n ## MYID\n # This ID is given by the parent for an instance of this class\n self.MyID = None\n ## Wrapping\n curvedict, labels = self.GetCurvedict()\n self.labels = labels\n self.Selector = UserSelectCurves(parent, curvedict,\n wrapper=self, labels=labels)\n # This is necessary for parent to deselect and select the tool\n # in the tools menu.\n self.Bind = self.Selector.Bind\n if self.parent.notebook.GetPageCount() == 0:\n self.Selector.sp.Disable()", "def __init__(self, parent=None):\n super().__init__(parent)\n # print(self.__dict__.keys())\n\n # print(self.__dict__.keys(), '\\n\\n')\n #\n # print(self.__dict__['menu'].__dict__.keys())\n # print(self.__dict__['menu']['leftMenu'])\n # self._viewbox.fftCheck.setObjectName(\"fftCheck\")\n\n # self.viewAll = QtGui.QRadioButton(\"Vue d\\'ensemble\")\n # self.viewAll.triggered.connect(self.autoRange)\n # self.menu.addAction(self.viewAll)\n # print(self.menu.__dict__['leftMenu'].__dict__)", "def __init__(self, parent=None):\n QLabel.__init__(self, parent)\n self.start_animation(self.SLOW_DURATION)", "def __init__(self, parent):\r\n\r\n AuiDockingGuide.__init__(self, parent, style=wx.FRAME_TOOL_WINDOW | wx.STAY_ON_TOP |\r\n wx.FRAME_NO_TASKBAR | wx.NO_BORDER | wx.FRAME_SHAPED,\r\n name=\"auiCenterDockTarget\")\r\n\r\n self.Hide()\r\n\r\n self.CreateShapesWithStyle()\r\n self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)\r\n \r\n if wx.Platform == \"__WXGTK__\":\r\n self.Bind(wx.EVT_WINDOW_CREATE, self.SetGuideShape)\r\n else:\r\n self.SetGuideShape()\r\n \r\n self.SetSize(self.region.GetBox().GetSize())\r\n\r\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)\r\n self.Bind(wx.EVT_PAINT, self.OnPaint)", "def __init__(self, parent = None):\n QObject.__init__(self, parent)\n self.overlay = None# assume overlay does not exist\n self.box_coordinates = [0,0,0,0]", "def __init__(self, parent):\n self.parent = parent\n myStyle = wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER|wx.TAB_TRAVERSAL\n wx.Dialog.__init__(self, None, wx.ID_ANY, '%s - Choose your colors' % globs.myName, style=myStyle)\n\n self._initialize()\n\n self.panel1.SetSizerAndFit(self.topBoxSizer)\n self.SetClientSize(self.topBoxSizer.GetSize())\n self.Centre()", "def init_widget(self):", "def create(self, parent):\n self.widget = QtGui.QTreeView(parent)", "def __init__(self, parent=None):\n super(addOne, self).__init__(parent)\n self.setupUi(self)\n \n # 内部自定义初始化操作 ---------------------------------------------------------------------begin\n self.zShow = Fdebug(self);\n self.zShow.show();\n self.verticalLayout.insertWidget(0, self.zShow)", "def __init__(self, parent, message):\n\n\t\tself.parent = parent\t\t\t\t# Main window\n\t\tself.message = message \t\t\t\t# Error message\n\t\t# Creat GUI\n\t\tself.initGUI()", "def __init__(self, scene, parent=None, flags=Qt.WindowFlags()):\n super(CustomQFrame, self).__init__(parent=parent, flags=flags)\n self.scene = scene\n self.parent = parent", "def __init__(self):\n super().__init__() # Call the superclass constructor\n self.setupUi(self) # Run the code that creates the UI layout\n self.saveButton.clicked.connect(self.save_change)\n self.pushButton.clicked.connect(self.go_back)", "def _setparent(self, parent):\n\t\tself.parent = parent\n\t\tif self.parent is not None:\n\t\t\tself.parent.components.add(self)", "def __init__(self, parent):\n self.name = \"Base.View\"\n self.parent = parent\n self.Main = parent.Main", "def __init__(self, parent=None):\n super().__init__(parent)\n\n topLayout = QtGui.QVBoxLayout(self)\n self.setLayout(topLayout)\n self.setTitle(_('Choose export format type'))\n\n typeButtons = QtGui.QButtonGroup(self)\n for id, exportType in enumerate(ExportDialog.exportTypes):\n button = QtGui.QRadioButton(ExportDialog.\n exportTypeDescript[exportType])\n typeButtons.addButton(button, id)\n topLayout.addWidget(button)\n if exportType == ExportDialog.currentType:\n button.setChecked(True)\n typeButtons.buttonClicked[int].connect(self.setCurrentType)", "def createWidget(self, QWidget): # real signature unknown; restored from __doc__\n pass", "def __init__(self):\n Form, Window = uic.loadUiType(\"Visuals/QtFiles/ConfirmationMenu.ui\")\n self.window = Window()\n self.form = Form()\n self.form.setupUi(self.window)\n self.centre = self.window.findChild(QWidget, \"centralwidget\")", "def __init__(self, parent: View):\n super().__init__(parent)", "def __init__(self, parent=None):\n super().__init__(parent);\n tabBar=EditableTabBar(parent);\n self.setTabBar(tabBar);", "def __init__(self, parent):\n super(StageInterface, self).__init__(parent)\n\n # Make the sizer\n self.sizer = wx.BoxSizer(wx.HORIZONTAL)\n\n # Make the toolbar, then add it to the sizer\n self.tb = wx.ToolBar(self, style=wx.TB_VERTICAL)\n self.sizer.Add(self.tb, 0, wx.EXPAND)\n\n # Make the visualizer\n self.set_visualization(StageVisualizer(self))\n\n # We want to see what's in the sizer\n self.SetSizer(self.sizer)", "def __init__(self):\n self.view = GuiView(self)\n return", "def __init__(self, parent):\n super().__init__(parent=parent)\n self.setupUi(self)\n self.tableView.setContextMenuPolicy(Qt.CustomContextMenu)\n self.tableView.customContextMenuRequested.connect(self._show_context_menu)\n self.tableView.setSelectionBehavior(QAbstractItemView.SelectItems)", "def __init__(self, parent, **kwargs):\n super(bubblePyHomeWidget, self).__init__(parent, **kwargs)\n self._firstView = True\n\n self.OneBubbleBtn.clicked.connect(self._handleOneBubble)\n self.ManyBubblesBtn.clicked.connect(self._handleManyBubbles)\n\n self._statusBox, statusLayout = self._createElementWidget(self, QtGui.QVBoxLayout, True)\n statusLayout.addStretch()\n\n self._nimbleStatus = NimbleStatusElement(\n self._statusBox,\n disabled=self.mainWindow.appConfig.get(UserConfigEnum.NIMBLE_TEST_STATUS, True) )\n statusLayout.addWidget(self._nimbleStatus)", "def __init__(self, parent):\n FindReplaceDialog.__init__(self, parent)\n self.ui.findReplaceForm.hide_replace_widgets()\n self.setWindowTitle(self.tr(\"Find\"))", "def __init__(self, parent: View):\n super().__init__(parent)\n # Crossword title\n self.title = tk.StringVar(self.root)\n self.title_label = tk.Label(self.frame, textvariable=self.title)\n # Crossword author\n self.author = tk.StringVar(self.root)\n self.author_label = tk.Label(self.frame, textvariable=self.author)\n # Dividing line separating the header and other groups\n self.separator = tk.Frame(self.frame)\n # Load\n self.load()", "def __init__(self, parent):\n QtGui.QDialog.__init__(self, parent)\n self.parent = parent\n self.ui = Ui_FileSelectDialog()\n self.ui.setupUi(self)\n mneRoot = os.environ.get('MNE_ROOT', '')\n if mneRoot == \"\":\n mneRoot = self.settings.value(\"MNE_ROOT\", \"\").toString()\n self.ui.lineEditMneRoot.setText(mneRoot)\n self.show()", "def widget(self, p_int): # real signature unknown; restored from __doc__\n return QWidget", "def __init__(self, parent, title=\"Plot\"):\n super(Dialog3DPlot, self).__init__(parent)\n self.setWindowTitle(title)\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)\n self.layout = QtGui.QHBoxLayout(self)\n self.mayavi = MayaviViewer(self)\n self.layout.addWidget(self.mayavi)\n self.messenger = Messenger()", "def __init__(self,\n\t label:str=None,\n\t variable_name:str=None,\n\t value:typing.Any=None,\n\t parent:QtWidgets.QWidget=None,\n\t on_change:typing.Callable=None):\n\t\tQtWidgets.QWidget.__init__(self, parent=parent)\n\n\t\tif label is None:\n\t\t\tif variable_name is None:\n\t\t\t\tlabel = \"\"\n\t\t\telse:\n\t\t\t\tlabel = app.translator(variable_name)\n\n\t\tself._make_label_widget(label)\n\t\tself.layout = self._formset()\n\t\tself.setLayout(self.layout)\n\t\tself.label = label\n\n\t\tValueMixin.__init__(self, variable_name=variable_name, on_change=on_change, value=value)", "def __init__(self, parent):\n super(sppasFeedbackDialog, self).__init__(\n parent=parent,\n title='{:s} Feedback'.format(sg.__name__),\n style=wx.DEFAULT_FRAME_STYLE)\n\n self.CreateHeader(MSG_HEADER_FEEDBACK, icon_name=\"mail-at\")\n self._create_content()\n self._create_buttons()\n self.Bind(wx.EVT_BUTTON, self._process_event)\n\n self.SetMinSize(wx.Size(480, 320))\n self.LayoutComponents()\n self.CenterOnParent()\n self.FadeIn(deltaN=-8)", "def __init__(self, parent=None):\n super(GUIForm, self).__init__()\n self.ui = Ui_MainWindow()\n self.Summarizer = TextRankSummarizer(\"english\")\n self.ui.setupUi(self)\n self.ui.plainTextEdit.textChanged.connect(self.edit_text)\n self.ui.actionOpen_text_file.triggered.connect(self.open_file_dialog)\n self.ui.actionOpen_text_from_url.triggered.connect(self.open_url)\n self.ui.pushButton.clicked.connect(self.summarize)\n self.ui.plainTextEdit.textChanged.connect(self.edit_text)\n self.ui.spinBox.setMinimum(0)\n self.ui.spinBox.setMaximum(0)\n self.ui.actionFile_menu_help.triggered.connect(\n self.show_file_menu_help)\n self.ui.actionGeneral_help_2.triggered.connect(self.show_general_help)\n self.ui.actionSummarization_help.triggered.connect(\n self.show_summary_help)", "def create_widget(self):\n pass", "def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Ciné Club\")\n self.setup_ui() # Ajout des Widgets.\n self.populate_movies()\n self.setup_connexions() # Création des connexion entre widgets.", "def initialize(self):\n super(QtBaseWidgetComponent, self).initialize()", "def __init__(self, parent=None):\n super(E5PathPicker, self).__init__(parent, useLineEdit=True)", "def _set_parent(self, parent):\n self.__parent = parent", "def __init__(self,*args, **kwargs):\n # super(FCmbMainWindow, self).__init__(*args, **kwargs)\n # self.setParent(mayaMainWindow) # ���´�������Ϊmaya���Ӽ�\n # self.setWindowFlags(Qt.Window)\n # self.setupUi(self) # ���и����ʼ������\n # self.connectSignals()\n\n\n super(FCmbMainWindow, self).__init__(*args, **kwargs)\n self.setParent(mayaMainWindow)\n self.setWindowFlags(Qt.Window)\n self.setupUi(self)", "def __init__(self, parent):\n\n super().__init__()\n\n self.color_depth = parent.color_depth\n self.original_hist = parent.calc_histogram()['b']\n self.img_data = parent.data.copy()\n self.current_img_data = None\n\n self.init_ui(self, [self.img_data.min(), self.img_data.max()])\n self.label_txt.setText(\"Choose the range for normalization:\")\n self.setWindowTitle(\"Normalize\")\n\n self.range_slider.left_value_changed.connect(self.update_left_value)\n self.range_slider.right_value_changed.connect(self.update_right_value)\n self.range_slider.range_chagned.connect(self.update_plot_preview)\n\n self.update_left_value()\n self.update_right_value()\n self.update_plot_preview()", "def __init__(self, ui, parent=None):\n super(LogViewer, self).__init__(parent)\n \n self.setWindowIcon(UI.PixmapCache.getIcon(\"eric.png\"))\n \n self.__ui = ui\n \n self.__logViewer = LogViewerEdit(self)\n from .SearchWidget import SearchWidget\n self.__searchWidget = SearchWidget(self.__logViewer, self)\n self.__searchWidget.setSizePolicy(\n QSizePolicy.Fixed, QSizePolicy.Preferred)\n self.__searchWidget.hide()\n \n self.__layout = QHBoxLayout(self)\n self.__layout.setContentsMargins(1, 1, 1, 1)\n self.__layout.addWidget(self.__logViewer)\n self.__layout.addWidget(self.__searchWidget)\n \n self.__searchWidget.searchNext.connect(self.__logViewer.searchNext)\n self.__searchWidget.searchPrevious.connect(self.__logViewer.searchPrev)\n self.__logViewer.searchStringFound.connect(\n self.__searchWidget.searchStringFound)", "def __init__(self, treeView, isChildView=True, parent=None):\n super().__init__(parent)\n self.treeView = treeView\n self.isChildView = isChildView\n self.hideChildView = not globalref.genOptions['InitShowChildPane']\n self.setAcceptRichText(False)\n self.setLineWrapMode(QTextEdit.NoWrap)\n self.setTabChangesFocus(True)\n self.setUndoRedoEnabled(False)\n self.treeSelectAction = QAction(_('Select in Tree'), self)\n self.treeSelectAction.triggered.connect(self.selectLineInTree)\n self.textChanged.connect(self.readChange)", "def parent(self, parent):\n\n self._parent = parent", "def parent(self, parent):\n\n self._parent = parent", "def parent(self, parent):\n\n self._parent = parent", "def parent(self, parent):\n\n self._parent = parent", "def __init__(self, form: dict, help: str, parent=None, is_multi_cif=False):\n super().__init__(parent)\n self.is_multi_cif = is_multi_cif\n self.setParent(parent)\n self.form = form\n # self.setMinimumWidth(400)\n self.mainVLayout = QVBoxLayout(self)\n self.setLayout(self.mainVLayout)\n # self.setStyleSheet('QWidget { border: 2px solid black }')\n self.mainVLayout.setContentsMargins(0, 0, 0, 0)\n self.mainVLayout.setSpacing(0)\n self.mainVLayout.addWidget(QHLine())\n # The button to get help for the respective alert:\n self.helpbutton = QPushButton('Help')\n self.helpbutton.clicked.connect(self.show_help)\n self.response_text_edit = QTextEdit()\n self.alert_label_box()\n self.problem_label_box()\n self.response_label_box()\n self.setAutoFillBackground(False)\n self.help = help\n #\n self.show()", "def __init__(self, parent=None):\n super(E5ComboPathPicker, self).__init__(parent, useLineEdit=False)", "def __init__(self, parent=None):\n super(yQGeoMagFieldSettingDialog, self).__init__(parent)\n self.setupUi(self)\n self.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.accept = False", "def __init__(self, parent):\n super(DummyStageInterface, self).__init__(parent)\n\n open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR,\n self.tb_size)\n self.tb.AddLabelTool(wx.ID_OPEN, \"Open\", open_bmp)\n\n self.tb.Realize()", "def create(self, parent):\n self.widget = wxBitmapWidget(parent)" ]
[ "0.80446535", "0.790637", "0.7829028", "0.7829028", "0.7818371", "0.770407", "0.770407", "0.770407", "0.770407", "0.770407", "0.76782995", "0.76558757", "0.76455885", "0.7594716", "0.7536031", "0.75227404", "0.751839", "0.7513868", "0.7468696", "0.7376986", "0.736306", "0.73554814", "0.7326882", "0.7282546", "0.7282546", "0.7282546", "0.72487694", "0.7183207", "0.7129338", "0.7121242", "0.70437986", "0.699516", "0.6994315", "0.69738275", "0.6963427", "0.6958084", "0.6949107", "0.6894904", "0.68611526", "0.68461186", "0.68147963", "0.6814142", "0.67745167", "0.6774384", "0.6772228", "0.6710709", "0.6692361", "0.66772085", "0.6663515", "0.6657969", "0.66513664", "0.6648562", "0.6641199", "0.6637269", "0.6637215", "0.6633686", "0.6633576", "0.6615191", "0.6575982", "0.6569227", "0.6546616", "0.6538472", "0.6532263", "0.65206826", "0.6509241", "0.65015864", "0.65014327", "0.6490079", "0.64487976", "0.6446116", "0.6440754", "0.6434976", "0.6432431", "0.64320654", "0.64289016", "0.6419205", "0.63946325", "0.63858664", "0.638552", "0.6365156", "0.6360151", "0.63587075", "0.6354245", "0.63523525", "0.6345173", "0.6340043", "0.63308144", "0.6317093", "0.6311447", "0.6310911", "0.6308296", "0.6307222", "0.6307222", "0.6307222", "0.6307222", "0.6295391", "0.6292048", "0.6284787", "0.628297", "0.6273498" ]
0.74049985
19
Construct a new Player
def __init__(self): self.loop = asyncio.get_event_loop() self.aiohttp = web.Application( loop=self.loop, middlewares=[unhandled_route], ) self.client = ClientSession() self.ws = WebSocketHandler(self) self.cert = self._load_ssl_certificate() self.config()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createPlayer(self):\n sw, ne = self.playerCreationRectangle\n x = self.random.randrange(sw.x, ne.x)\n y = 1.0\n z = self.random.randrange(sw.y, ne.y)\n player = Player(Vector(x, y, z), 2, self.seconds)\n for observer in self.observers:\n observer.playerCreated(player)\n self.players.append(player)\n return player", "def newPlayer():\r\n pass", "def create_player(\n self, plr_id, last_name, first_name, position,\n alternate_last_names=[], alternate_first_names=[],\n alternate_positions=[], capfriendly_id=None):\n # initiliazing player object\n # TODO: remove alternate options (if necessary)\n plr = Player(\n plr_id, last_name, first_name, position,\n alternate_last_names=alternate_last_names,\n alternate_first_names=alternate_first_names,\n alternate_positions=alternate_positions)\n if capfriendly_id:\n plr.capfriendly_id = capfriendly_id\n\n commit_db_item(plr, True)\n\n return Player.find_by_id(plr_id)", "def __init__(self, player_name, player_number, player_position):\n self.name = player_name\n self.number = player_number\n self.position = player_position", "def __init__(self, player):\n\t\tself.player = player", "def __init__(self, player):\n self.player = player", "def create_player(renderer: sdl2.render.SDL_Renderer) -> Entity:\n player = Entity()\n player.position = Vec2f(\n config.SCREEN_WIDTH / 2,\n config.SCREEN_HEIGHT - PLAYER_SIZE / 2)\n player.active = True\n player.tag = \"player\"\n\n sprite_renderer = components.SpriteRenderer(\n renderer, player, \"sprites/player.bmp\")\n player.add_component(sprite_renderer)\n\n keyboard_mover = components.KeyboardMover(player, PLAYER_SPEED)\n player.add_component(keyboard_mover)\n\n keyboard_shooter = components.KeyboardShooter(player, Player_shot_cooldown)\n player.add_component(keyboard_shooter)\n return player", "def __init__(self,player,name,playerFn):\n\t\tself.player = player\n\t\tself.name = name\n\t\tself.playerFn = playerFn", "def create_player(self, players, player_no, start_tile):\n if player_no in players.keys():\n raise ValueError(f\"Invalid map layout. Can't create player no {player_no} as it already exists.\")\n players[player_no] = Player(player_no, start_tile, self)\n return players[player_no]", "def create_player_at(self, x, y):\n cell_size = self.map.get_cell_size()\n obj = Can(\n parent=self.map,\n style={\n 'width': cell_size, \n 'height': cell_size * 2, \n 'z-index': layers['object'] }\n )\n # I am a player\n player(obj)\n # I am a movable character\n make_character(self, obj, \n on_move=lambda dir: obj.play_moving(duration=0.2, loop=True),\n on_stop=lambda dir: obj.reset_animations())\n # I could be destroyed by bombs\n make_breakable(self, obj, \n on_die=lambda: self.game_lose())\n # I can put bombs\n make_bomber(self, obj)\n\n self.map.add_node(obj, x, y, 0, -cell_size)\n return obj", "def __init__(self, *args, **kwargs):\n super(Player, self).__init__(*args, **kwargs)", "def __new__(cls, player, data):\n track = player.get_current()\n \n self = object.__new__(cls)\n self.player = player\n self.track = track\n return self", "def create_random_player(name=\"\", level=0, race=RACE.NONE, sex=SEX.NONE, way=WAY.NONE):\n if not name and name != \"\":\n log.bug(\"name non è un parametro valido: %r\" % name)\n return\n\n if level < 0 or level > config.max_level:\n log.bug(\"level non è un parametro valido: %d\" % level)\n return\n\n if not race:\n log.bug(\"race non è un parametro valido: %r\" % race)\n return\n\n if not sex:\n log.bug(\"sex non è un parametro valido: %r\" % sex)\n return\n\n if not way:\n log.bug(\"way non è un parametro valido: %r\" % way)\n return\n\n # -------------------------------------------------------------------------\n\n player = Player()\n player = create_random_mob(player, name, level, race, sex, way)\n\n # Ora che player possiede una razza ed un sesso può creare un nome\n # casuale se questo non è stato passato\n if not player.name:\n player.name = create_random_name(player.race, player.sex, is_player_name=True)\n player.code = remove_colors(player.name.lower())\n\n # Crea il giocatore con i dati di base\n # (TD) dovrei impostare casualmente tanti altri attributi\n player.flags.randomize()\n create_random_reputations(player)\n\n return player", "def test_create_player(self):\n self.assertIsInstance(self.player, ship.Ship)\n self.assertEqual(self.player.position, constants.PLAYER_START_PLACE)\n self.assertEqual(self.player.width, constants.PLAYER_WIDTH)\n self.assertEqual(self.player.height, constants.PLAYER_HEIGHT)\n self.assertEqual(self.player.img, constants.PLAYER_IMG)\n self.assertEqual(self.player.health, constants.PLAYER_HEALTH)", "def __init__(self, player1: Player.Player, player2: Player.Player):\n\n self.player1 = player1 #: Player\n self.player2 = player2 #: Player\n\n self.board = Board.Board()", "def __init__(self, player_name):\n self._player_name = player_name\n self._hand = Deck() \n self._coder = Deck()", "def build_player(self):\n\t\t\n\t\tclear_screen()\n\n\t\tprint(\"Let's build your character before starting.\")\n\t\t\n\t\tpress_enter()\n\t\tclear_screen()\n\n\t\ta = input('What is the name of your character? ')\n\t\tb = input('What is the Race of your character? ')\n\n\t\tself.info['Name'] = a.title()\n\t\tself.info['Race'] = b.title()\n\n\t\tclear_screen()\n\n\t\tprint('You have successfully created {} the {}.'.format(a.title(), b.title()))\n\t\tprint('You will begin with {} Hit Points and {} Gold Pieces.'.format(self.stats['HP'], \n\t\t\tself.stats['GOLD']))\n\t\tprint('\\nIt\\'s time to enter the dungeon!')\n\n\t\tpress_enter()", "def player_from_raw(data: Dict[str, Any]) -> andesite.Player:\n return build_from_raw(andesite.Player, data)", "def __init__(self, width=640, height=480, fps=60):\n self.width = width\n self.height = width\n self.fps = fps\n self.size = width, height\n\n self.graphics = graphics.Graphics(self.size)\n self.clock = pygame.time.Clock()\n\n self.player = Player(self.graphics, 320, 240)", "def create_player(id_player: str):\n id_player = str(id_player)\n last_name = input(\"Last name of the player : \")\n first_name = input(\"First name of the player : \")\n birthday = input(\"Birthday of the player : \")\n sex = input(\"Sex of the player : \")\n elo = int(input(\"Elo of the player: \"))\n\n if not Player.get(id_player):\n Player(id_player, last_name, first_name, birthday, sex, elo)\n else:\n raise Exception(f\"The ID {id_player} already exists : {Player.get(id_player)}\")", "def __init__(self, players):\n\n self._players = players\n self._game = None", "def spawn_players(self) -> None:\n #Create the player\n self.player1 = Player(self.sensitivity, self.screen_width, self.screen_height, self.screen_width//(3/2), self.screen_height-50, self.player_lives, self.fps, self.player1_bullet, Direction.UP, self.debug)\n\n #Create the AI\n self.player2 = AIPlayer(self.sensitivity, self.screen_width, self.screen_height, self.screen_width//3, self.screen_height-50, self.player_lives, self.fps, self.player2_bullet, Direction.UP, 1, True, self.debug)", "def __init__(self, game, player):\n self.game = game\n self.player = player", "def get_player(self, num):\n\n name = input(f\"What is the name for player number {num}? \")\n player = Player(name)\n return player", "def __init__(self, number_players=1000):\n self.player_list = []\n for i in range(number_players):\n self.player_list.append(Player())", "def create_player(self, request):\n if request.player_name:\n if Player.query(Player.name == request.player_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n else:\n raise endpoints.BadRequestException('verify the name that you are sending in the request')\n if request.email:\n if gameutils.get_regex(request.email) == None:\n print(' ERROR - invalid email, please try again')\n raise endpoints.ConflictException(\n 'invalid email, please try again!')\n else:\n raise endpoints.BadRequestException('verify the email that you are sending in the request')\n\n player = Player(name=request.player_name, email=request.email)\n player.put()\n\n return StringMessage(message='Player created!'.format(request.player_name))", "def createWithPlayers(cls, players, **kwargs):\n game = TicTacToe()\n return cls(game, players, **kwargs)", "def __init__(self, players=None):\n self.game = Game()\n if players:\n self.player1 = players[0]\n self.player2 = players[1]\n else:\n self.player1 = Player('X')\n self.player2 = Player('O')\n self.record = Record()\n self.winning_moves = []", "def __init__(self, players):\n self.players = players\n self.board = Board()", "def __init__(self, players):\n\n self._players = players\n self._current_player = players.get()", "def __init__(self):\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)", "def spawn_players(self) -> None:\n # Initialise the players\n self.player1 = Player(self.sensitivity, self.screen_width, self.screen_height, self.screen_width // 2, 50,\n self.player_lives, self.fps, self.player1_bullet, Direction.DOWN, self.debug)\n self.player2 = Player(self.sensitivity, self.screen_width, self.screen_height, self.screen_width // 2,\n self.screen_height - 50, self.player_lives, self.fps, self.player2_bullet, Direction.UP,\n self.debug)\n\n # Rotate the image of the player at the top\n self.player1.rotate(180)", "def __init__(self, player, board):\n self.player = player\n self.board = board", "def __init__(self, letter: str, player: int = 0) -> None:\n self.letter, self.player = letter, player", "def get_player(self, player_name, player_type):\n\n # Return correct player class\n if player_type == \"human\":\n return Player(player_name)\n if player_type == \"computer\":\n return ComputerPlayer(player_name)", "def clone(self):\n\n # You may be tempted to re-implement using the `copy` module\n # Note that this would require a deepcopy in some cases and there may\n # be significant changes required throughout the library.\n # Consider overriding in special cases only if necessary\n cls = self.__class__\n new_player = cls(**self.init_kwargs)\n new_player.match_attributes = copy.copy(self.match_attributes)\n return new_player", "def __init__(self):\n\n self.name = 'KuhnPoker'\n self.num_players = 2", "def __init__(self, id, first_name, last_name, player_name, age, type):\n AbstractEsportsPlayer._validate_input_integer('ID', id)\n self._id = id\n\n AbstractEsportsPlayer._validate_input_string('First Name', first_name)\n self._first_name = first_name\n\n AbstractEsportsPlayer._validate_input_string('Last Name', last_name)\n self._last_name = last_name\n\n AbstractEsportsPlayer._validate_input_string('Player Name', player_name)\n self._player_name = player_name\n\n AbstractEsportsPlayer._validate_input_integer('Age', age)\n self._age = age\n\n AbstractEsportsPlayer._validate_input_string('Type', type)\n self._type = type", "def __init__(self):\n\n self.score = 0\n self.game_over = False\n\n # Create sprite lists\n self.all_sprites_list = pygame.sprite.Group()\n\n # Create the player\n self.player = Player(5, 5)\n self.all_sprites_list.add(self.player)", "def __init__(self, player):\n \n self.colour = player\n self.game_in_head = Board()", "def create_player():\n\n\t#TODO : Ajout d'une BDD des différents joueurs avec des scores et vérifier la présence des joueurs choisis dans cette BDD pour charger les scores\n\n\tactivator = ''\n\tinhibitor = ''\n\n\tprint(\"\\nEntrez le pseudo du joueur\",colors.GREEN + \"'Activator' : \" + colors.STOP, end = \"\")\n\tactivator = input()\n\n\tprint(\"\\nEntrez le pseudo du joueur\", colors.RED + \"'Inhibitor' : \"+colors.STOP, end = \"\")\n\tinhibitor = input()\n\n\t# Default usernames if not defined by users\n\tif len(activator) == 0:\n\t\tactivator = 'Activator'\n\n\tif len(inhibitor) == 0:\n\t\tinhibitor = 'Inhibitor'\n\n\t# Attribute to each player the status he chose\n\tData.current_player['Activator'] = activator\n\tData.current_player['Inhibitor'] = inhibitor\n\n\treturn activator, inhibitor", "def __init__(self, player_id):\n self.player_id = player_id\n self.hand = []\n self.name = [\"Anikó\",\n \"Bori\",\n \"Nagyapa\",\n \"Kinga\",\n \"Jocó\",\n \"Nagyi\",\n \"Éva\",\n \"Robi\",\n \"Józsi\"][player_id]", "def create_player (self, username = None):\n # Get unique username if needed\n if (username == None):\n username = \"default_username\" + str (time.time ())\n self.username = username\n r = requests.post (self.url_endpoint, data = {\"new_player\": self.username})\n if (r.status_code != 201):\n print (\"Failed to create user:\\n\", r.text)\n return r\n play_data = json.loads (r.text)\n self.secret = play_data['player_secret']\n with open (self.filename, \"w\") as f:\n f.write (f\"username {self.username}\\nsecret {self.secret}\")", "def __init__(self, player_id, player_name, chat_id):\n\n # find player info in a database\n with Player.client as client:\n db = client.game_db\n user = db.players.find_one({\"id\": player_id})\n if user is None:\n # add user to database\n user = {\"id\": player_id,\n \"name\": player_name,\n \"chat_id\": chat_id,\n \"games_num\": 0,\n \"rating\": Player.default_rating}\n db.players.insert(user)\n \n self.id = user[\"id\"]\n self.name = user[\"name\"]\n self.chat_id = user[\"chat_id\"]\n self.rating = user[\"rating\"]\n self.game = None\n self.balance = None\n self.stake = None\n self.timer = None\n self.out_of_time = False\n self.searching = False", "def __init__(self, RoomName = \"living\"):\n self.room_name = RoomName\n self.objects = Objects()\n self.character = Player()", "def __init__(self, player, gamestate):\n self.player = player\n self.gamestate = gamestate", "def __init__(self, player):\r\n other_player = \"lower\" if player == \"upper\" else \"upper\"\r\n \r\n #set our sides \r\n self.player_information[\"us\"][\"player_side\"] = player\r\n self.player_information[\"them\"][\"player_side\"] = other_player\r\n\r\n #create our board edge and board representation\r\n self.board_edge = hex_boundary_getter((0,0), 4, [])\r\n self.board_array = generate_board()", "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.clock = pg.time.Clock()\n self.fps = 60.0\n self.keys = pg.key.get_pressed()\n self.done = False\n self.player = Player((50,875), 4)\n self.level = pg.Surface((1000,1000)).convert()\n self.level_rect = self.level.get_rect()\n self.viewport = self.screen.get_rect(bottom=self.level_rect.bottom)\n self.win_text,self.win_rect = self.make_text()\n self.obstacles = self.make_obstacles()", "def __init__(self, label, player):\n \n self._label = label\n self._player = player.upper()\n self._legal_moves = dict()\n self._potential_moves = list()", "def __init__(self, player_raw: dict, time_per_player_game: int,\n start_side: int, board_size: int, n_players: int):\n self.is_ai = player_raw[\"is_ai\"]\n self.id_ = player_raw[\"id_\"]\n self.name = player_raw[\"name\"]\n self.executable = player_raw[\"executable\"]\n self.color = player_raw[\"color\"]\n self.time_per_game = time_per_player_game\n self.start_side = start_side\n self.board_size = board_size\n self.n_players = n_players\n self.coordinates = self.get_starting_coordinates(\n board_size, start_side)\n\n self.goals: List[Coordinates] = self.get_goal_states(\n self.coordinates, self.board_size)\n\n if self.is_ai and os.path.isfile(self.executable):\n self.proc: Optional[Popen] = self.start_ai(\n self.executable,\n self.n_players,\n self.board_size,\n self.time_per_game,\n self.start_side,\n )", "def make_player(self, page):\r\n player = Player()\r\n face = page.find(\"div\",id=\"info_content\").find_all(\"td\")\r\n player.name = face[0].get_text().strip()\r\n player.club = face[1].get_text().strip()\r\n player.nation = face[2].get_text().strip()\r\n player.league = face[3].get_text().strip()\r\n player.sf = int(face[4].get_text().strip())\r\n player.wf = int(face[5].get_text().strip())\r\n player.ir = int(face[6].get_text().strip())\r\n player.foot = face[7].get_text().strip()\r\n player.height = float(face[8].get_text().split(\"|\")[0].strip(\"cm \"))\r\n player.weight = float(face[9].get_text().strip(\"\"))\r\n player.version = face[10].get_text().strip()\r\n player.def_wr = face[11].get_text().strip()\r\n player.att_wr = face[12].get_text().strip()\r\n player.added_on = datetime.strptime(face[13].get_text().strip()[2:], \"%y-%m-%d\")\r\n player.real_face = face[15].get_text().strip()==\"icon-checkmark text-success\"\r\n player.body_type = face[16].get_text().strip()\r\n player.age = face[17].get_text().strip(\" years old \\n\\r\")\r\n player.rating = self.make_rating([sub for sub in page.find(\"div\",id=\"stats_box\").find(class_=\"stats-inner col-md-12\").find(class_=\"row\").children])\r\n player.href = \"/\"+page.find(id=\"share_player_link\")[\"value\"].strip(\"https://www.futbin.com/\")\r\n player.pid = int(page.find(id=\"page-info\")[\"data-player-resource\"])\r\n return player", "def __init__(self, player_control, players=None):\r\n self.player_control = player_control\r\n self.players = {} # copy for restoration\r\n if players is not None:\r\n for player in players.values():\r\n self.add_player(player)", "def __init__(self):\r\n self.players = {}", "def new(sock, info=None):\n return ProxyPlayer(sock, info)", "def __init__(self, player1, player2):\n self.player1 = Player(player1)\n self.player2 = Player(player2)\n self.die = Die()\n self.turn(self.player1)", "def __init__(self, player1, player2, state_machine, restore = False):\r\n super().__init__()\r\n self.__players[0] = player1\r\n self.__players[1] = player2\r\n self.__player_names[player1] = 'Human'\r\n self.__player_names[player2] = 'Bot'\r\n self.__state_machine = state_machine", "def __init__(self, players):\n\n # Instantiate a Players object with the players queue\n self._players = Players(players)\n # Instantiate the Die to be used for the current game\n self._die = Die()\n # Track the game status\n self._active_turn = True\n self._end_game = False", "def create_players_list(self):\n for p in self.players_names:\n self._players_list.append(Player(p))", "def __init__(self):\n player_1 = input('Player 1, Enter your name: ')\n player_2 = input('Player 2, Enter your name: ')\n self.__fields = [Field(), Field()]\n self.__players = [Player(player_1), Player(player_2)]\n self.__current_player = 0\n self.__next_player = 1", "def __init__(self, players):\n\n # Define the players\n self.players = players\n\n # Define who starts the game\n self.nplayer = 1 \n\n # Define the board\n self.board = [0] * 9", "def _new(self, *args):\n if self.game:\n raise ServerException('already playing a game')\n self.game, self.player = self.server.new_game(self)", "def __init__(self):\n self.players = {1: [\"Player_a\", \"\\u25CF\"], 2: [\"Player_b\", \"\\u25CB\"]}\n self.current_player = 1\n self.playing_player = self.players[1]\n self.grid = [[\" \"] * 6 for x in range(7)]", "def __init__(self, player1, player2):\n self.players = [player1, player2]\n self.tokens = {\n ' ': ' ',\n player1: 'X',\n player2: 'O',\n }\n self.score = {\n player1: 0,\n player2: 0,\n }\n self.moves = None\n self.winner = None\n self.turn = ''\n self.reset()", "def __init__(self, name=\"Player\", resources=[0,0,0,0,0,0,0,0], xor_resources=None,\\\n current_hand=None, structures=None, starting_gold=3, discounted_resources=None):\n if structures != None:\n self.structures = structures # by type? Should we have a structure type? \n else:\n self.structures = []\n \n self.name = name\n self.wonders = None \n player.west_natural= False\n player.west_manufactured = False\n player.east_natural= False\n player.east_manufactured= False\n\n if current_hand == None:\n self.current_hand = None\n else:\n self.current_hand = current_hand #I dont know if we need this\n self.starting_gold = starting_gold", "def __init__(self, x, y, rot, plyId):\n # Call the parent class (Sprite) constructor\n super().__init__()\n\n # Define the speed parameters of the players\n self._fbSpeed = 2 # Forward/Backward Speed in pixels/frame\n self._lrSpeed = 1 # Left/Right Speed in pixels/frame\n self._rotSpeed = 2 # In degrees/frame\n\n # Define the initial position of the player\n self._x = x\n self._y = y\n self._rot = rot # 0 degree is facing upward\n self._radius = 10\n\n self._dx = 0\n self._dy = 0\n\n self._plyId = plyId\n\n self._hp = 3", "def create_player(self,player_name, attr = None, team_file = None):\n player_first, player_last = player_name.split(\" \")\n player_file = player_name.replace(\" \", \"\") + '.json'\n if(os.path.exists(self.player_path + player_file)):\n return(False)\n else:\n with open(self.player_path + player_file, 'x') as new_file:\n with open(self.player_template_path, 'r') as template:\n data = json.load(template)\n data['player_name'] = player_first + ' ' + player_last\n json.dump(data, new_file)\n template.close()\n new_file.close()\n\n\n if attr: # If the user inputed new data, add the data, else use template\n try:\n self.update_player_attribute(player_file, attr)\n except:\n os.remove(player_file)\n\n if team_file: #if the user selected a team, add the player to the team\n self.add_team_player(team_file, player_file)\n\n return(True)", "def setUp(self):\n self.player = Player()", "def create_player(player: Player) -> None:\n with engine.connect() as conn:\n\n conn.execute(\n player_table.insert().values(\n steamid=player.steamid,\n level=player.level,\n xp=player.xp,\n credits=player.credits,\n )\n )\n\n skills = list(player.skills)\n result = conn.execute(\n skill_table.insert().values([\n {\n 'key': skill.key,\n 'level': skill.level,\n 'steamid': player.steamid,\n }\n for skill in skills\n ])\n )\n\n for id, skill in zip(result.inserted_primary_key, skills):\n skill._db_id = id", "def make_player(x, y):\n global frames, player_down\n images1 = gamebox.load_sprite_sheet(\"Textures/naked_up.png\", 1, 4)\n images2 = gamebox.load_sprite_sheet(\"Textures/naked_down.png\", 1, 4)\n images3 = gamebox.load_sprite_sheet(\"Textures/naked_left.png\", 1, 4)\n images4 = gamebox.load_sprite_sheet(\"Textures/naked_right.png\", 1, 4)\n player = []\n for image in images1:\n player.append(gamebox.from_image(x, y, image))\n for image in images2:\n player.append(gamebox.from_image(x, y, image))\n for image in images3:\n player.append(gamebox.from_image(x, y, image))\n for image in images4:\n player.append(gamebox.from_image(x, y, image))\n frames = 4\n for each in player:\n each.scale_by(.15)\n return player", "def player(self, **kw):\n _append_conds(self._default_cond, types.Player, kw)\n return self", "def __init__(self, player_id, difficulty_level):\n self.player_id = player_id\n self.difficulty_level = difficulty_level\n global our_player\n our_player = player_id", "def __init__(self, players, piles=None):\n self.players = players\n self.piles = piles if (piles != None) else [5, 5, 5, 5]\n self.nplayer = 1 # player 1 starts.", "def __init__(self,\n name : str = \"Player\") -> None:\n self.name = name\n self.gems_possessed = GemsCollection()\n self.cards_possessed = set()\n self.cards_reserved = set()\n self.nobles_possessed = set()", "def __init__(self, player_num):\n self.name = \"Computer player \" + str(player_num)\n self.ward = str(player_num)\n self.budget = 10\n self.happiness = 100", "def __init__(self, noPlayers, noLaps):\n\t\tself.noPlayers = noPlayers\n\t\tself.noLaps = noLaps\n\t\tself.players = []\n\t\tself.laps = dict()\n\t\tfor pNo in range(0, noPlayers-1):\n\t\t\tplayer = Player()\n\t\t\tself.players.append(player)\n\t\t\tself.laps[id(player)] = 0", "def initialize_players():\n return [Player(name, Hand([]), 0) for name in PLAYER_NAMES]", "def __init__(self):\n # Current player\n self.player = X\n\n # Board\n self.board = [\n [None, None, None],\n [None, None, None],\n [None, None, None]\n ]\n\n # Winner\n self.winner = None\n\n # Game over\n self._gameover = False", "def __init__(self, pname, pmax, plist):\n\n #the player has to have... \n self.name = pname\n self.max_items = pmax\n self.items = plist", "def __init__(self, player_x_type, player_o_type):\n self.board = Board()\n self.player_x = player_x_type(self.FIRST_PLAYER_MARK)\n self.player_o = player_o_type(self.SECOND_PLAYER_MARK)", "def __init__(self, colour):\n self.colour = colour\n self.name = \"Player\"", "def spawnPlayer( player ):\r\n \r\n room = random.sample(world.maps.World.roomsList, 1)[0]\r\n \r\n # Uncomment below to force spawn in a certain room\r\n room = \"544\"\r\n \r\n player.room = room\r\n world.maps.World.mapGrid[room].players[player.name] = player\r\n player.status = PLAYING\r\n sendToRoomNotPlayer( player, \"{0}{1} appears in a flash!{2}\".format(BLUE, player, WHITE) )\r\n tellWorld( player, None, \"{0} has entered the arena!\".format(player.name) )\r\n \r\n displayRoom(player, player.room)", "def __init__(self, hand, pot, pub, phase, status, player):\n\n self.hand = hand\n self.pot = pot\n self.pub = pub\n self.phase = phase\n self.status = status\n self.player = player", "def __init__(self, game: int) -> None:\n self.game = game\n self.players: List[object] = []", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n player, created = UserPlayer.objects.update_or_create(user=user,\n xp=validated_data.pop('xp'),\n score=validated_data.pop('score'))\n player.save()\n avatar_data = validated_data.pop('avatar')\n for avatar in avatar_data:\n player.avatar.add(avatar)\n player.save()\n return player", "def __init__(\r\n self,\r\n player_count=4,\r\n strategy=[HumanRandom(), HumanRandom(), HumanRandom(), HumanRandom()],\r\n rules=None,\r\n ):\r\n # shuffle the cards\r\n shuffle(self.community_cards)\r\n shuffle(self.chance_cards)\r\n\r\n self.player_positions = [0] * player_count\r\n self.current_player = randint(0, player_count - 1)\r\n self.player_list = []\r\n for i in range(player_count):\r\n self.player_list.append(\r\n Player(uid=i, token=self.token[i], strategy=strategy[i])\r\n )\r\n self.full_turn_count = 1", "def __init__(self,player):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.c = config.Config()\n\n\t\tself.image = pygame.image.load(self.c.IMAGE_PATH + \"bomb.png\").convert()\n\t\tself.position = self.image.get_rect()\n\t\tself.position = self.position.move((player.position.x,player.position.y))\n\t\tself.range = player.power\n\t\tself.player = player\n\t\tself.triggered = False", "def __init__(self, player_id = 0):\n all_players = ['adiumy', 'amanda', 'beastie', 'emule', 'gavroche', 'hexley', 'kiki', 'konqi', 'nolok', 'pidgin', 'puffy', 'sara_the_racer', 'sara_the_wizard', 'suzanne', 'tux', 'wilber', 'xue']\n self.kart = all_players[np.random.choice(len(all_players))]", "def __init__(self, players, num_of_players):\r\n self.players = players\r\n self.num_of_players = num_of_players\r\n self.active_players = num_of_players\r\n self.dealer = Dealer()\r\n self.card_stack = CardStack()\r\n self.money_stack = MoneyStack()\r\n self.cur_player = 0\r\n self.round_num = 0\r\n self.round_player_money = 0", "def __init__(self,player1: Player = ManualPlayer(\"P1\"),\\\r\n player2: Player = ManualPlayer(\"P2\")):\r\n\r\n self.board = np.zeros((BOARD_SIZE,BOARD_SIZE)\\\r\n ,dtype=np.int8)\r\n self.board[3,3] = '2'\r\n self.board[4,4] = '2'\r\n self.board[3,4] = '1'\r\n self.board[4,3] = '1' \r\n\r\n self.players = []\r\n self.players.append(player1)\r\n self.players.append(player2)\r\n self.turn = 1\r\n self.count = 0", "def __init__(self, player):\n self.player = player\n player.career.seasons.append(self)\n self.team = player.team\n self.league = self.team.league\n self.year = self.team.cosmos.year", "def __init__(self, players, board_size, board=None, timeout=10):\n if not 2 <= len(players) <= 4:\n raise ValueError(\"Invalid number of players provided.\")\n\n if board is None:\n board = Board(*board_size)\n\n self.__check_board_is_valid(board, players)\n\n self.board = board\n \n self.__set_colors(players)\n\n self.players = {p.get_color(): p for p in players}\n\n self.state = State([BoardPlayer(p.get_color()) for p in players], board)\n self.violators = []\n self.timeout = timeout", "def __init__(self, player1AI = False, player2AI = False):\n\t\tself.tick = 0\n\t\tself.player1AI = player1AI\n\t\tself.player2AI = player2AI\n\t\tself.selectionIndex = [0, 0]\n\t\tself.colours = [\"#ff6363\", \"#ffc163\", \"#88de68\", \"#63c6ff\", \"#ffffff\", \"#000000\"]\n\t\tself.playerReady = [False, False]\n\t\tself.playerKeys = {0: [\"W\", \"S\", \"SPACE\"], 1: [\"⭡\", \"⭣\", \"ENTER\"]}\n\t\tself.timeSinceReady = 0\n\t\tself.headings = [\n\t\t\t\"Player 1:\" if not self.player1AI else \"Computer:\",\n\t\t\t\"Player 2:\" if not self.player2AI else \"Computer:\"\n\t\t]\n\t\tself.itemSpacing = 0.15", "def get_player(self,p):\n self._validate(p)\n return p.player()", "def __init__(self, *names):\n deck = Deck.create(shuffle=True)\n self.names = (list(names) + \"P1 P2 P3 P4 \".split())[:4]\n self.hands = {\n n: Player(n, h) for n, h in zip(self.names, deck.deal(4))\n }", "def from_player(cls, player: dict):\n title = player[\"videoDetails\"][\"title\"]\n videoId = player[\"videoDetails\"][\"videoId\"]\n duration = float(player[\"videoDetails\"][\"lengthSeconds\"])\n print(player[\"videoDetails\"][\"thumbnail\"][\"thumbnails\"])\n thumbs = sorted(player[\"videoDetails\"][\"thumbnail\"][\"thumbnails\"], key=lambda k: int(k['width']),\n reverse=True)\n name_channel = player[\"videoDetails\"][\"author\"]\n url_channel = f\"https://youtube.com/channel/{player['videoDetails']['channelId']}\"\n shortDescription = player[\"videoDetails\"][\"shortDescription\"]\n cls(title=title, videoId=videoId, duration=duration, thumbnails=thumbs,\n channel={\"name\": name_channel, \"url\": url_channel},\n description=shortDescription)", "def __init__(self, player_1):\n self.die = Die()\n self.player_1 = player_1\n self.current_player = self.player_1\n self.turns = 1", "def set_player(self, char_data):\n self.player = self.server.object_manager.add_player(char_data)", "def build(player_name):\n\n data = Gamebuilder.load_game(player_name)\n\n if not data:\n data = MazeFactory.build()\n\n hero_data = {\n \"name\": player_name,\n \"description\": \"the hero of dork!\",\n \"location\": \"Entrance\",\n \"inventory\": {},\n \"equipped\": []\n }\n\n data[\"rooms\"][\"room 0\"][\"players\"][player_name] = hero_data\n\n game = Gamebuilder._instantiate(Game, **data)\n setattr(game, \"maze\", data[\"maze\"])\n setattr(\n game, \"rooms\", Gamebuilder._make_rooms(\n deepcopy(data[\"rooms\"])\n )\n )\n\n Gamebuilder._place_players(game)\n Gamebuilder._make_paths(game)\n\n Gamebuilder._get_adj_description(game)\n Gamebuilder._get_room_inv_description(game)\n\n for player in Player.instances:\n if player.name == player_name:\n hero = player\n\n game.hero = hero\n game.maze[hero.location.x][hero.location.y] = MazeFactory.player_color\n return game", "def start(self, player: Player) -> Game:\n\n board_payload = dict(rows=self.rows, cols=self.cols)\n initial_slots = self._get_initial_slots(**board_payload)\n board_db = self.repo.boards.add(\n {**board_payload, \"slots\": initial_slots, \"mines\": 0}\n )\n board = Board.from_orm(board_db)\n\n board.set_mines(mines=self.mines)\n board_db = self.repo.boards.update(board_db, board)\n\n game_payload = dict(\n player_id=player.id,\n board_id=board.id,\n status=GameStatusEnum.ongoing,\n start_time=datetime.utcnow(),\n )\n game_db = self.repo.games.add(game_payload)\n game = Game.from_orm(game_db)\n return game", "def set_player(self, new_player):\n self.player = new_player", "def __init__(self, player):\n self._piece_type = 'pawn'\n self._value = 2 if player == \"white\" else -2\n self._summary = 'W-Pw' if player == \"white\" else 'B-Pw'\n\n self._directions = []\n if player == \"white\":\n self._directions.append([(-1, 1)])\n self._directions.append([(0, 1), (0, 2)])\n self._directions.append([(1, 1)])\n else:\n self._directions.append([(-1, -1)])\n self._directions.append([(0, -1), (0, -2)])\n self._directions.append([(1, -1)])" ]
[ "0.8133294", "0.73655224", "0.7338226", "0.7169045", "0.7107056", "0.71011436", "0.7019268", "0.70154893", "0.7000745", "0.6951531", "0.6851873", "0.68201184", "0.6619382", "0.6601898", "0.6583949", "0.65359473", "0.65358025", "0.6531014", "0.6509729", "0.6432442", "0.639431", "0.6384338", "0.6380783", "0.6352182", "0.6346931", "0.6345022", "0.6334281", "0.6331245", "0.63193166", "0.63183755", "0.63050205", "0.62954515", "0.62884116", "0.62843657", "0.6266335", "0.6258372", "0.6255088", "0.62519485", "0.6251419", "0.6236972", "0.62302816", "0.6226051", "0.61988246", "0.6194732", "0.6169049", "0.61588806", "0.61413133", "0.6140967", "0.61400944", "0.61333555", "0.6110486", "0.61085", "0.60893023", "0.60802895", "0.60620546", "0.6058266", "0.60281694", "0.60187894", "0.6012773", "0.5987974", "0.59713596", "0.5970546", "0.596462", "0.5957881", "0.5949568", "0.5939807", "0.59351623", "0.5927", "0.5904668", "0.5893068", "0.5891962", "0.58664215", "0.5863508", "0.5862113", "0.5845753", "0.5837241", "0.58220935", "0.5818197", "0.5816892", "0.58151907", "0.580563", "0.5805235", "0.5801938", "0.5794945", "0.57919496", "0.57917655", "0.5782946", "0.5777469", "0.5768787", "0.5757127", "0.57246417", "0.5722708", "0.57205", "0.57170814", "0.57147723", "0.5712282", "0.57085776", "0.57073516", "0.57053447", "0.57021564", "0.5687415" ]
0.0
-1
Load all quest handlers here
def load_quests(self): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_handlers(self):\n\t\tself.handlers = []\n\t\tfor mod in os.listdir('classes/handlers'):\n\t\t\tif mod == '__init__.py' or mod[-3:] != '.py':\n\t\t\t\tcontinue\n\t\t\tlib = __import__(mod[:-3], locals(), globals())\n\t\t\tself.handlers.append(lib)\n\t\t#\n\t\tself.handlers.sort(key=lambda x: x.order, reverse=False)\n\t\tprint(\"Loaded handlers: \", ', '.join([x.tag for x in self.handlers]) )\n\t\tassert len(self.handlers)>0", "def _handler_init(self):\r\n\t\tself._handlers[\"player-join\"] = FunctionDelegate()\r\n\t\tself._handlers[\"player-quit\"] = FunctionDelegate()\r\n\t\tself._handlers[\"game-start\"] = FunctionDelegate()\r\n\t\tself._handlers[\"game-stop\"] = FunctionDelegate()", "def __luanch_handlers(self):\n\n self.__updater = Updater(self.__token, use_context=True)\n self.__dp = self.__updater.dispatcher\n # on different commands - answer in Telegram\n self.__dp.add_handler(CommandHandler(\"start\", self.start_message))\n self.__dp.add_handler(CommandHandler(\"help\", self.help))\n self.__dp.add_handler(CommandHandler(\"history\", self.history))\n self.__dp.add_handler(CommandHandler(\"request\", self.request))\n self.__dp.add_handler(CommandHandler(\"cancel\", self.cancel))\n self.__dp.add_handler(CommandHandler(\"show\", self.show))\n self.__dp.add_handler(CommandHandler(\"promote\", self.promote))\n self.__dp.add_handler(CommandHandler(\"demote\", self.demote))\n self.__dp.add_handler(CommandHandler(\"checkadmin\", self.check_admin))\n self.__dp.add_handler(CommandHandler(\"kick\", self.kick))\n self.__dp.add_handler(CommandHandler(\"stop\", self.stop_all))\n self.__dp.add_handler(CommandHandler(\"whatsmyid\", self.__whatsmyid))\n self.__updater.start_polling()", "def loadTreeHandlers(self):\n #\n # Paths for key folders\n plugin_path = g.os_path_join(g.app.loadDir, \"..\", \"plugins\")\n self.handler_path = handler_path = g.os_path_join(g.app.loadDir, \"..\", \"plugins\", \"trees\")\n #\n if not g.os_path_isdir(handler_path):\n g.es(\"No tree handler folder found\", color=\"red\")\n else:\n g.es(\"Scanning for tree handlers\", color=\"blue\")\n #\n # Add folder locations to path\n old_path = sys.path[:]\n sys.path.insert(0, plugin_path)\n sys.path.insert(0, handler_path)\n #@+<< Get plugin manager module >>\n #@+node:ekr.20050329082101.135: *4* << Get plugin manager module >>\n # Get the manager\n try:\n self.plugin_manager = __import__(\"plugin_manager\")\n except ImportError as err:\n g.es(\"Autotrees did not load plugin manager: %s\" % (err,), color=\"red\")\n self.plugin_manager = None\n #@-<< Get plugin manager module >>\n #@+<< Find all handlers >>\n #@+node:ekr.20050329082101.136: *4* << Find all handlers >>\n # Find all handlers\n for filename in glob.glob(g.os_path_join(handler_path, \"*.py\")):\n handler_name = g.os_path_splitext(g.os_path_split(filename)[1])[0]\n g.es(\"... looking in %s\" % handler_name, color=\"blue\")\n try:\n self.loadHandlersFrom(handler_name)\n except BadHandler as err:\n g.es(\"... unable to load '%s' handler: %s\" % (handler_name, err), color=\"red\")\n #@-<< Find all handlers >>\n # Restore\n sys.path = old_path", "def handle_loadall(bot, ievent):\n plugs.loadall(plugin_packages, force=True)\n ievent.done()", "def load_commands():\n register_plugin(configure_client_details)\n register_plugin(search_venues)", "def import_all_handlers(self):\n import os\n exclude_list=[\"base\"]\n\n #\n # the list of handlers (excluding base. Add more you dont want\n # to be loaded or inspected to exclude_list above.)\n #\n mods=[]\n module_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'handlers'))\n #print(\"importing handlers from: \" + module_path)\n for mod in os.listdir( module_path ):\n mod = mod.split(\".\")[0]\n if not mod.startswith(\"_\") and not mod in exclude_list:\n #print(\" now processing: \" + str(mod))\n mods.append(mod)\n \n #print(\"mods: \" + str(mods))\n class_list = []\n # load all the models from their modules (mods)\n #print(str(mods))\n import importlib\n for m in mods:\n #print(\"importing: \" + 'pow_comments.handlers.' + m) \n try:\n mod = importlib.import_module('pow_comments.handlers.' + m)\n except:\n pass\n #print(dir(mod))", "def load(self):\n\n self.commands = {\n # Usual text commands (e.g. \"/echo 123\")\n 'user': {},\n 'owner': {\n 'load': self.load,\n 'modprobe': self.modprobe,\n 'rmmod': self.rmmod\n },\n # Modules for bot's reaction to a different message types\n 'text': {},\n 'photo': {},\n 'audio': {},\n 'video': {},\n 'sticker': {},\n 'voice': {}\n }\n\n for file in os.listdir('modules'):\n if file.endswith('.py'):\n command_type, command = file.split('_', 1)\n self.modprobe(self, command[:-3])", "def _register_handlers(self):\n DBG(\"\\nregister handlers\")\n for hook, handler in self.handlers:\n g.registerHandler(hook, handler)\n\n signal_manager.connect(self.c, 'body_changed', self._after_body_key)", "def loadAllCommand(self, player):\n for eachCmd in self.commands.keys():\n player.addCommand(eachCmd, self.commands[eachCmd]())", "def init_bot(self):\n dispatcher = self.updater.dispatcher\n\n dispatcher.add_handler(CommandHandler(\"start\", self.on_bot_start))\n dispatcher.add_handler(CommandHandler(\"help\", self.on_bot_help))\n dispatcher.add_handler(CommandHandler(\"about\", self.on_bot_about))\n dispatcher.add_handler(CommandHandler(\"vreausaajut\", self.on_bot_offer_to_help))\n dispatcher.add_handler(CommandHandler(\"status\", self.on_status))\n dispatcher.add_handler(CommandHandler(\"Da\", self.on_accept))\n dispatcher.add_handler(CommandHandler(\"Nu\", self.on_reject))\n\n dispatcher.add_handler(CallbackQueryHandler(self.negotiate_time, pattern=\"^eta.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_dispatch, pattern=\"^caution.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_handle, pattern=\"^handle.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_wellbeing, pattern=\"^state.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_symptom, pattern=\"^symptom.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_wouldyou, pattern=\"^wouldyou.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_further, pattern=\"^further.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_activities, pattern=\"^assist.*\"))\n\n dispatcher.add_handler(MessageHandler(Filters.photo, self.on_photo))\n dispatcher.add_handler(MessageHandler(Filters.contact, self.on_contact))\n dispatcher.add_handler(MessageHandler(Filters.text, self.on_text_message))\n dispatcher.add_error_handler(self.on_bot_error)", "def load():\r\n global database\r\n global ranks\r\n \r\n osPlatform = (\"Windows\" if os.name == \"nt\" else \"Linux\" if os.name == \"posix\" else os.name)\r\n debug.write('Log file started at %s' % time.strftime(\"%A %d %B %Y - %H:%M:%S\"), 0, False)\r\n debug.write('\\n*******************************************************', 0, False)\r\n debug.write('[SourceRPG]: Turning your Server into a Role Playing Game', 0, False)\r\n debug.write('[SourceRPG]: Current Version - %s' % info.version, 0, False)\r\n debug.write('[SourceRPG]: Made by %s' % info.author, 0, False)\r\n debug.write('\\nSystem Info:', 0, False)\r\n debug.write('\\tOS: %s' % osPlatform, 0, False)\r\n debug.write('\\tEventscripts Version: %s' % es.ServerVar('eventscripts_ver'), 0, False)\r\n debug.write('\\tCorelib Version: %s' % es.ServerVar('es_corelib_ver'), 0, False)\r\n debug.write('\\tEventscript Tools Version: %s' % es.ServerVar('est_version'), 0, False)\r\n debug.write('\\tEventscripts Noisy: %s' % es.ServerVar('eventscripts_noisy'), 0, False)\r\n debug.write('\\tPopuplib version: %s' % popuplib.info.version, 0, False) \r\n \r\n cmdlib.registerSayCommand(\"rpgmenu\", sayCommands.mainMenu, \"Opens the rpg main menu\")\r\n cmdlib.registerSayCommand(\"rpgupgrade\", sayCommands.upgradeMenu, \"Opens the upgrade menu\")\r\n cmdlib.registerSayCommand(\"rpgsell\", sayCommands.sellMenu, \"Opens the sell menu\")\r\n cmdlib.registerSayCommand(\"rpghelp\", sayCommands.helpMenu, \"Opens the help menu\")\r\n cmdlib.registerSayCommand(\"rpgstats\", sayCommands.stats, \"Opens the stats menu for the user or another player\")\r\n cmdlib.registerSayCommand(\"rpgrank\", sayCommands.rank, \"Tells the player their rank or another player's rank\")\r\n cmdlib.registerSayCommand(\"rpgpopup\", sayCommands.togglePopup, \"Tells the player their rank or another player's rank\")\r\n cmdlib.registerSayCommand(\"rpgtop10\", sayCommands.top10, \"Sends the player the last updated top 10 scores\")\r\n \r\n es.server.cmd(\"exec sourcerpg/skill_loader.cfg\")\r\n \r\n es.server.cmd(\"exec sourcerpg/addon_loader.cfg\")\r\n \r\n skillConfig.write(True)\r\n skillConfig.execute(True, True)\r\n\r\n debug.write('[SourceRPG] Starting the popup creation', 0, False)\r\n\r\n \"\"\" Create the default popups which aren't unique to players \"\"\"\r\n rpgmenu = popuplib.easymenu(\"sourcerpg_rpgmenu\", \"_popup_choice\", popups.rpgmenu)\r\n rpgmenu.settitle(\"=== %s Menu ===\" % prefix)\r\n rpgmenu.addoption(1, \"Upgrade Skills\")\r\n rpgmenu.addoption(2, \"Sell Skills\")\r\n rpgmenu.addoption(3, \"RPG Help\")\r\n rpgmenu.addoption(4, \"RPG Stats\")\r\n rpgmenu.addoption(5, \"Reset Skills\")\r\n \r\n helpMenu = popuplib.easymenu('sourcerpg_help', '_popup_choice', popups.helpmenu)\r\n helpMenu.settitle('=== %s Help ===' % prefix)\r\n helpMenu.addoption(1, 'About SourceRPG')\r\n helpMenu.addoption(2, 'List of Commands')\r\n helpMenu.addoption(3, 'About SourceRPG Skills')\r\n helpMenu.addoption(4, 'Credit')\r\n helpMenu.submenu(10, \"sourcerpg_rpgmenu\")\r\n \r\n confirmation = popuplib.easymenu('sourcerpg_confirm', '_popup_choice', popups.confirm)\r\n confirmation.settitle(\"=== %s Reset Stats ===\" % prefix)\r\n confirmation.setdescription(\"\"\"Are you sure you want to remove\r\nyour skills? There is no chance\r\nor recovering them again!\"\"\")\r\n confirmation.addoption(True, \"Yes\")\r\n confirmation.addoption(False, \"No\")\r\n \r\n about = popuplib.create('sourcerpg_about')\r\n about.addline('=== About %s ===' % prefix)\r\n about.addline('-' * 30)\r\n about.addline('SourceRPG is a python coded mod')\r\n about.addline('for EventScripts 2+. It enables')\r\n about.addline('players to gain Levels, by gaining')\r\n about.addline('XP from certain events, such as')\r\n about.addline('planting the bomb, or killing')\r\n about.addline('another player. Each level gives')\r\n about.addline('%s Credits, which allows you to' % creditsReceived)\r\n about.addline('buy certain skills which aid you')\r\n about.addline('in killing other players.')\r\n about.addline('-' * 30)\r\n about.addline('->8. Back')\r\n about.addline('0. Cancel')\r\n about.submenu(8, 'sourcerpg_help')\r\n \r\n commandspopup = popuplib.create('sourcerpg_commands')\r\n commandspopup.addline(\"=== %s Commands ===\" % prefix)\r\n commandspopup.addline(\"-\" * 30)\r\n commandspopup.addline(\"rpghelp - displays the help menu\")\r\n commandspopup.addline(\"rpgmenu - displays the main menu\")\r\n commandspopup.addline(\"rpgrank - displays your RPG rank\")\r\n commandspopup.addline(\"rpgpopup - toggles on / off automatic popup display\")\r\n commandspopup.addline(\"rpgupgrade - upgrade skills\")\r\n commandspopup.addline(\"rpgsell - sell skills\")\r\n commandspopup.addline(\"rpgstats - display your stats\")\r\n commandspopup.addline(\"-\" * 30)\r\n commandspopup.addline(\"->8. Back\")\r\n commandspopup.addline(\"0. Cancel\")\r\n commandspopup.submenu(8, 'sourcerpg_help')\r\n \r\n creditmenu = popuplib.create('sourcerpg_creditmenu') \r\n creditmenu.addline('=== %s Credits ===' % prefix)\r\n creditmenu.addline('-' * 30)\r\n creditmenu.addline(info.author)\r\n creditmenu.addline(' Script Creator')\r\n creditmenu.addline(' ')\r\n creditmenu.addline('SumGuy14 and Murphey')\r\n creditmenu.addline(' Letting me use their Long Jump code')\r\n creditmenu.addline(' ')\r\n creditmenu.addline('SuperDave')\r\n creditmenu.addline(' He turned my failing SmogNade code into')\r\n creditmenu.addline(' a working code! Thank him for that skill.')\r\n creditmenu.addline(' ')\r\n creditmenu.addline('JoeyT2008 (Jordan Thomas)')\r\n creditmenu.addline(' Awesome scripter who made the database conversion')\r\n creditmenu.addline(' ')\r\n creditmenu.addline('EventScripts Community')\r\n creditmenu.addline(' Help and support, and such a good plugin.')\r\n creditmenu.addline('-' * 30)\r\n creditmenu.addline('8. Back')\r\n creditmenu.addline('0. Cancel')\r\n creditmenu.submenu(8, 'sourcerpg_help')\r\n \r\n debug.write('[SourceRPG] Popups created', 0, False)\r\n \r\n \r\n if int(turboMode):\r\n database = DATABASE_STORAGE_METHOD(\":memory:\")\r\n else:\r\n database = DATABASE_STORAGE_METHOD(databasePath)\r\n \r\n ranks = RankManager()\r\n \r\n \"\"\" If the script is loaded late then make sure all players are inserted \"\"\"\r\n if es.getplayercount():\r\n for player in es.getUseridList():\r\n players.addPlayer( player )\r\n \r\n es.server.queuecmd('mp_restartgame 1')\r\n\r\n if str( es.ServerVar('eventscripts_currentmap')):\r\n es_map_start({})\r\n\r\n \"\"\" If we want to save by intervals then create a repeat to save the database \"\"\"\r\n if str( saveType ) == \"intervals\":\r\n gamethread.delayedname(float(saveLength), 'sourcerpg_databasesave', saveDatabase)\r\n \r\n debug.write('[SourceRPG]: Finished Loading... Enjoy your stay!', 0, False)\r\n debug.write('*******************************************************\\n', 0, False)", "def makeHandlers(self):\n\n yield self.loadGrids.start(funcSelf=self)\n yield self.updateClientWatchedGrids.start(funcSelf=self)\n logger.debug(\"RPCs started\")", "def setupInputEventHandlers(self):\n\n default.Script.setupInputEventHandlers(self)\n self.inputEventHandlers.update(\n self.structuralNavigation.inputEventHandlers)\n\n self.inputEventHandlers[\"sayAllHandler\"] = \\\n input_event.InputEventHandler(\n Script.sayAll,\n cmdnames.SAY_ALL)\n\n self.inputEventHandlers[\"panBrailleLeftHandler\"] = \\\n input_event.InputEventHandler(\n Script.panBrailleLeft,\n cmdnames.PAN_BRAILLE_LEFT,\n False) # Do not enable learn mode for this action\n\n self.inputEventHandlers[\"panBrailleRightHandler\"] = \\\n input_event.InputEventHandler(\n Script.panBrailleRight,\n cmdnames.PAN_BRAILLE_RIGHT,\n False) # Do not enable learn mode for this action", "def loadPlayerCommands(self, player):\n player.addCommand('get', self.commands['get']())\n player.addCommand('drop', self.commands['drop']())\n player.addCommand('go', self.commands['go']())\n player.addCommand('say', self.commands['say']())\n player.addCommand('look', self.commands['look']())\n player.addCommand('quit', self.commands['quit']())\n player.addCommand('commands', self.commands['commands']())\n player.addCommand('color', self.commands['color']())", "def loadallskills(self):\r\n for skill in os.listdir( os.path.join( es.getAddonPath( info.basename ), \"skills\" )):\r\n es.load(\"%s/skills/%s\" % (info.basename, skill))", "def loadHandlersFrom(self, name):\n try:\n module = __import__(name)\n except Exception as err:\n raise BadHandler(\"Failed import: %s\" % err)\n #\n # Look for handler classes\n for cls_name in dir(module):\n object = getattr(module, cls_name)\n try:\n is_handler = issubclass(object, BaseTreeHandler)\n except TypeError:\n is_handler = False\n if is_handler:\n g.es(\"... found handler '%s'\" % (cls_name,), color=\"blue\")\n self.handlers[cls_name.lower()] = object", "def u2handlers(self):\n return []", "def __setupCommandHandlerTypes(self):\n # dict saving all command handler types\n self.__commandHandlers = {'channel': {}, 'query': {}, 'not_authed_dcc': {}, 'authed_dcc': {}}", "def load_data(self):\n super(MudderyNPC, self).load_data()\n \n data = self.get_data_record()\n if not data:\n return\n\n # set NPC's default dialogues.\n self.set_dialogue(data.dialogue)", "def load_all_resources():\n\n # Load the fonts\n ResourcesManager._load_font(\"Munro.ttf\")\n\n # Load images\n ResourcesManager.HIBER_NATION_IMG = ResourcesManager._load_image(\"hiber_nation.png\")\n ResourcesManager.SHIP_IMG = ResourcesManager._load_image(\"ship.png\")\n ResourcesManager.MISSILE_IMG = ResourcesManager._load_image(\"missile.png\")\n\n # Load sounds\n # ResourcesManager.MENU_MUSIC = ResourcesManager._load_sound(\"menu.ogg\")", "def get_handlers_for_event(self, event):\n pass # pragma: no cover", "def setup(cls):\n super().setup()\n cls.default_dialogues = cast(\n DefaultDialogues, cls._skill.skill_context.default_dialogues\n )\n cls.tac_dialogues = cast(TacDialogues, cls._skill.skill_context.tac_dialogues)\n cls.oef_search_dialogues = cast(\n OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues\n )", "def load_shutit_modules(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif self.loglevel <= logging.DEBUG:\n\t\t\tself.log('ShutIt module paths now: ',level=logging.DEBUG)\n\t\t\tself.log(self.host['shutit_module_path'],level=logging.DEBUG)\n\t\tfor shutit_module_path in self.host['shutit_module_path']:\n\t\t\tself.load_all_from_path(shutit_module_path)", "def quests(self, quests):\n\n self._quests = quests", "def register_handlers(path = EXPLOIT_FOLDER):\n\n exploit_folder = './{}/{}'.format(os.path.dirname(__file__), path)\n handlers = []\n\n for module in os.listdir(exploit_folder):\n\n if not module.endswith(\".py\") or module == \"__init__.py\":\n continue\n\n # Execute the script\n # We assume that each executed script registers himself to the handlers dictionary.\n try:\n execfile('./{}/{}'.format(path, module))\n except Exception as e:\n log.failure(\"Could not register handler '{}' : {}\".format(module, e))\n\n log.info(\"Registered {} handler(s).\".format(len(handlers)))\n for handler in handlers:\n\n handler_name = handler.__name__\n log.info(\"- Registered '{}' handler\".format(handler_name))\n\n return handlers", "def get_handlers(self):\n raise NotImplementedError()", "def _handlers(self):\n settings = self.get_settings(prefix='tangled.app.handler.')\n # System handler chain\n handlers = [settings['exc']]\n if self.has_any('static_directory'):\n # Only enable static file handler if there's at least one\n # local static directory registered.\n dirs = self.get_all('static_directory')\n if any(isinstance(d, LocalDirectory) for d in dirs):\n handlers.append(settings['static_files'])\n handlers.append(settings['tweaker'])\n handlers.append(settings['notifier'])\n handlers.append(settings['resource_finder'])\n if self.get_setting('csrf.enabled'):\n handlers.append(settings['csrf'])\n if 'auth' in settings:\n handlers.append(settings['auth'])\n # Handlers added by extensions and applications\n handlers += self.get_all(abcs.AHandler, [])\n if self.get_setting('cors.enabled'):\n handlers.append(settings['cors'])\n # Main handler\n handlers.append(settings['main'])\n # Wrap handlers\n wrapped_handlers = []\n next_handler = None\n for handler in reversed(handlers):\n handler = HandlerWrapper(handler, next_handler)\n wrapped_handlers.append(handler)\n next_handler = handler\n wrapped_handlers.reverse()\n return wrapped_handlers", "def request_plugins(self):", "def _register_handlers(self):\n self.jm.register_handler(\"move_node\", self.move_node)\n self.jm.register_handler(\"copy_node\", self.copy_node)\n self.jm.register_handler(\"push_to_vospace\", self.push_to_vospace)\n self.jm.register_handler(\"push_from_vospace\", self.push_from_vospace)\n self.jm.register_handler(\"pull_to_vospace\", self.pull_to_vospace)\n self.jm.register_handler(\"pull_from_vospace\", self.pull_from_vospace)", "def post_load(self):\r\n for _, effect in self._effects.items():\r\n effect.post_load()", "def addHandlers(self, handlers):\n self._eventHandlers.update(handlers)\n keys = self._eventHandlers.keys()\n pygame.event.set_allowed(keys)", "def _register_handlers(self):\n import handlers as th\n import inspect\n for name, class_type in inspect.getmembers(th, predicate=inspect.isclass):\n if class_type is th.ZMQTopicHandlerBase:\n continue\n handler = class_type()\n topic = handler.get_topic()\n if topic in self._topic_handlers:\n self._topic_handlers.append(handler)\n else:\n self._topic_handlers[topic] = [handler]", "def on_load(self):\n pass", "def on_load(self):\n pass", "def ready(self):\n logger.info('game.ready')\n import game.signals", "async def HalflingMasterCheerleaders(self, ctx):\n\n data = getattr(special_play, inspect.currentframe().f_code.co_name)()\n await self.send_embed(data, ctx)", "async def load_all_extensions(self):\n await self.wait_until_ready()\n await asyncio.sleep(1)\n\n cogs = [\"cogs.member\",\n \"cogs.officer\",\n \"cogs.rolemanager\",\n \"cogs.database\",\n \"cogs.everyone\",\n \"cogs.nodewar\",\n \"cogs.twitch\"]\n\n for extension in cogs:\n try:\n self.load_extension(extension)\n print(f'loaded {extension}')\n except Exception as e:\n error = f'{extension}\\n {type(e).__name__} : {e}'\n print(f'failed to load extension {error}')\n print('-' * 10)\n\n for guild in self.guilds:\n if not discord.utils.get(guild.roles, name=self.manager_role):\n await self.create_bot_manager(guild)\n\n print(f\"\\nUsername: {self.user}\\nID: {self.user.id}\")", "def __init__(self):\n self._order_handlers = []\n self._target_handlers = {}\n\n self._robot = None\n self._lock = threading.Lock()", "def main():\n\n populate('monsters')\n populate('spells')\n populate('conditions')\n populate('encounters')\n populate('characters')\n populate('actions')", "def _setup_collision_handlers(self):\n self._world.add_collision_handler(\"player\", \"item\", on_begin=self._handle_player_collide_item)\n self._world.add_collision_handler(\"player\", \"block\", on_begin=self._handle_player_collide_block,\n on_separate=self._handle_player_separate_block)\n self._world.add_collision_handler(\"player\", \"mob\", on_begin=self._handle_player_collide_mob)\n self._world.add_collision_handler(\"mob\", \"block\", on_begin=self._handle_mob_collide_block)\n self._world.add_collision_handler(\"mob\", \"mob\", on_begin=self._handle_mob_collide_mob)\n self._world.add_collision_handler(\"mob\", \"item\", on_begin=self._handle_mob_collide_item)", "def _load(self):\n\t\tpool = []\n\t\tview = []\n\t\tlibrary = []\n\n\t\tif is_file(\"~/comiccrawler/pool.json\"):\n\t\t\tpool = json.loads(content_read(\"~/comiccrawler/pool.json\"))\n\n\t\tif is_file(\"~/comiccrawler/view.json\"):\n\t\t\tview = json.loads(content_read(\"~/comiccrawler/view.json\"))\n\n\t\tif is_file(\"~/comiccrawler/library.json\"):\n\t\t\tlibrary = json.loads(content_read(\"~/comiccrawler/library.json\"))\n\n\t\tfor m_data in pool:\n\t\t\t# reset state\n\t\t\tif m_data[\"state\"] in (\"DOWNLOADING\", \"ANALYZING\"):\n\t\t\t\tm_data[\"state\"] = \"ERROR\"\n\t\t\t# build episodes\n\t\t\tepisodes = []\n\t\t\tfor ep_data in m_data[\"episodes\"]:\n\t\t\t\tepisodes.append(Episode(**ep_data))\n\t\t\tm_data[\"episodes\"] = episodes\n\t\t\tmission = Mission(**m_data)\n\t\t\tself._add(mission)\n\n\t\tfor url in view:\n\t\t\tself.view[url] = self.pool[url]\n\n\t\tfor url in library:\n\t\t\tself.library[url] = self.pool[url]\n\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", self.view)\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", self.library)", "def plugins_ready():\n\n for plugin in registerorder:\n plugin.ready()", "def event_loop(self):\n for event in pg.event.get():\n self.keys = pg.key.get_pressed()\n if event.type == pg.QUIT or self.keys[pg.K_ESCAPE]:\n self.done = True\n self.cannon.get_event(event, self.objects)", "def create_hooks(self, hclass):\n for extension in self.db.values():\n self.events.register_callbacks_from_inst(hclass, extension)", "def cacheHandlers(self):\n\n def collect_handlers(module):\n\n def wanted(member):\n return (isclass(member) and\n issubclass(member, handlers.HandlerBase) and\n member.__name__.endswith('Handler'))\n\n m = {}\n for name, obj in getmembers(module, wanted):\n m[name] = obj(self.skype)\n m[name].init()\n return m\n\n self.handlers = collect_handlers(handlers)\n if custom_handlers:\n self.handlers.update(collect_handlers(custom_handlers))", "def load(self):\n\n super().load()\n self.check_dcss()\n self.check_discord()", "def add_handler(self, handler):\n pass", "def on_load(self):", "def main(self):\n\n dp = self.dispatcher\n\n dp.add_handler(MessageHandler(Filters.text, self.__msg_handler))\n dp.add_handler(MessageHandler(Filters.command, self.__msg_handler))\n dp.add_handler(MessageHandler(Filters.photo, self.__photo_handler))\n dp.add_handler(MessageHandler(\n Filters.location, self.__location_handler))\n dp.add_handler(CallbackQueryHandler(callback=self.__clb_handler))\n\n self.updater.start_polling()\n self.updater.idle()", "def setup(cls):\n super().setup()\n cls.http_handler = cast(\n HttpHandler, cls._skill.skill_context.handlers.http_handler\n )\n cls.logger = cls._skill.skill_context.logger\n\n cls.http_dialogues = cast(\n HttpDialogues, cls._skill.skill_context.http_dialogues\n )\n\n cls.get_method = \"get\"\n cls.post_method = \"post\"\n cls.url = \"some_url\"\n cls.version = \"some_version\"\n cls.headers = \"some_headers\"\n cls.body = b\"some_body\"\n cls.sender = \"fetchai/some_skill:0.1.0\"\n cls.skill_id = str(cls._skill.skill_context.skill_id)\n\n cls.status_code = 100\n cls.status_text = \"some_status_text\"\n\n cls.content = b\"some_content\"\n cls.list_of_messages = (\n DialogueMessage(\n HttpMessage.Performative.REQUEST,\n {\n \"method\": cls.get_method,\n \"url\": cls.url,\n \"version\": cls.version,\n \"headers\": cls.headers,\n \"body\": cls.body,\n },\n ),\n )", "def __initHandlersUser(self):\n handlers = {}\n handlers['WRITE_FILE'] = self.write_file\n handlers['READU_FILE'] = self.read_file\n handlers['DELET_FILE'] = self.delete_file\n handlers['STATUS_SRV'] = self.status_server\n handlers['RSYNC_FILE'] = self.rsync_file\n handlers['WSYNC_FILE'] = self.wsync_file\n return handlers", "def load(self):\r\n self.create_effect_classes()\r\n\r\n self._add_resource_descriptions_to_pools(self.create_external_resources())\r\n self._add_resource_descriptions_to_pools(self.create_resources())\r\n\r\n for meta, resource in resources.textures.load_pool():\r\n self._textures[meta.label] = resource\r\n\r\n for meta, resource in resources.programs.load_pool():\r\n self._programs[meta.label] = resource\r\n\r\n for meta, resource in resources.scenes.load_pool():\r\n self._scenes[meta.label] = resource\r\n\r\n for meta, resource in resources.data.load_pool():\r\n self._data[meta.label] = resource\r\n\r\n self.create_effect_instances()\r\n self.post_load()", "def loadSuits(level):\n loadSuitModelsAndAnims(level, flag = 1)\n loadDialog(level)", "def OnStartup(cls, modName ):\r\n import sys\r\n targetMod = sys.modules[ modName ]\r\n cls.StartupVarsInjectToMod( targetMod )\r\n EasyIpyMagics.RegisterMagics()\r\n \r\n cls.RegisterPostExecute()", "def _add_extensions(self):\n ext_cache_down = 'cache_downloading'\n ext_cache_up = 'cache_uploading'\n cmd_args = self.task_data.get('cmd_args', {})\n if not isinstance(cmd_args, dict):\n cmd_args = {}\n if cmd_args.get('save_raw_pages', False):\n self.required_signals[SIGNAL_SPIDER_OPENED]['wait'] += \\\n EXTENSION_SIGNALS[ext_cache_up]\n if cmd_args.get('load_raw_pages'):\n self.required_signals[SIGNAL_SCRIPT_CLOSED]['wait'] += \\\n EXTENSION_SIGNALS[ext_cache_down]", "def _run_lyrics_gui(self):\n self._log.info(\"Searching for lyrics\")\n\n self.save_lyrics(find=True)\n Action(\"load\", load=True)\n\n self._log.info(\"Done\")", "def _post_load(self):\n pass", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def load_commands():\n return [AddBook, FindBook, FindBooks, EditBook, RemoveBook, ReviewBook]", "def _skills_manager_dispatch():\n global ws\n ws.emit(Message(\"skill_manager\", {}))", "def __load_handler(self):\n with open(self.path) as file:\n for line in file:\n if line.startswith(\"\"\"# TABLE: \"\"\"):\n self.columndefinition = (line.strip('\\n')\n .replace(\"\"\"# TABLE: \"\"\", ''))\n self.tablename = self.name.replace('.', '_')\n self.tablename = self.tablename.replace('-', '_')\n self.md5_tablename = (hashlib.md5(self.tablename)\n .hexdigest()[:30])\n for columnelement in self.columndefinition.split(','):\n column = columnelement.split(':')[0].strip()\n self.columnnames.append(column)\n\n self.is_mime_handler = True", "def __init__(self):\n self.modes = {}\n self.modelist = []\n self.mode = 'main'\n self.defs = {}\n events.bind(Key=self.dispatch)", "def ready(self):\n import exams.signals # pylint: disable=unused-import", "def setup(webhook_url=None):\n logging.basicConfig(level=logging.WARNING)\n if webhook_url:\n bot = Bot(TOKEN)\n update_queue = Queue()\n dp = Dispatcher(bot, update_queue)\n else:\n updater = Updater(TOKEN)\n bot = updater.bot\n dp = updater.dispatcher\n conv_handler1 = ConversationHandler(\n entry_points=[CommandHandler('upcoming',upcoming_menu1,pass_user_data=True,pass_args=True)],\n allow_reentry=True,\n states={\n REC_LOC: [MessageHandler(Filters.location|Filters.text,recieve_location,pass_user_data=True)],\n UPCM_2: [CallbackQueryHandler(upcoming_menu2,pattern=r'\\w*3\\b',pass_user_data=True)],\n GET_COUNTRY:[MessageHandler(Filters.text,get_country,pass_user_data=True)],\n GET_CITY:[MessageHandler(Filters.text,get_city,pass_user_data=True)]\n },\n fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True, pass_args=True)]\n )\n\n conv_handler2=ConversationHandler(\n entry_points=[CommandHandler('set_location', set_location, pass_user_data=True,pass_args=True),\n CommandHandler(\"start\", start, pass_args=True, pass_user_data=True)],\n allow_reentry=True,\n states={\n SET_LOC: [MessageHandler(Filters.location | Filters.text, recieve_set_loc, pass_user_data=True)],\n SET_COUNTRY: [MessageHandler(Filters.text, set_country, pass_user_data=True)],\n SET_CITY: [MessageHandler(Filters.text, set_city, pass_user_data=True)]\n },\n fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True, pass_args=True)]\n )\n conv_handler3 = ConversationHandler(\n entry_points=[CommandHandler('subscribe',check_subscriber, pass_user_data=True, pass_args=True)],\n allow_reentry=True,\n states={\n SUBS_2: [CallbackQueryHandler(subscribe, pattern=r'\\w*4\\b', pass_user_data=True)],\n },\n fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True, pass_args=True)]\n )\n conv_handler4 = ConversationHandler(\n entry_points=[CommandHandler('unsubscribe', check_unsubscriber, pass_user_data=True,pass_args=True)],\n allow_reentry=True,\n states={\n UNSUB_1: [CallbackQueryHandler(unsubscribe, pattern=r'\\w*5\\b', pass_user_data=True)],\n },\n fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True, pass_args=True)]\n )\n # ADMIN CONVERSATION HANDLER TO BROADCAST MESSAGES\n conv_handler5 = ConversationHandler(\n entry_points=[CommandHandler('broadcast', broadcast,pass_args=True,pass_user_data=True)],\n allow_reentry=True,\n states={\n BDC: [MessageHandler(Filters.text, broadcast_message)]\n },\n\n fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True, pass_args=True)]\n )\n # CONVERSATION HANDLER FOR REPLACING SQLITE DATABASE\n conv_handler6 = ConversationHandler(\n entry_points=[CommandHandler('senddb', getDb,pass_user_data=True,pass_args=True)],\n allow_reentry=True,\n states={\n DB: [MessageHandler(Filters.document, db)]\n },\n\n fallbacks=[CommandHandler('cancel', cancel, pass_user_data=True, pass_args=True)]\n )\n dp.add_handler(conv_handler1)\n dp.add_handler(conv_handler2)\n dp.add_handler(conv_handler3)\n dp.add_handler(conv_handler4)\n dp.add_handler(conv_handler5)\n dp.add_handler(conv_handler6)\n dp.add_handler(CommandHandler('givememydb', givememydb,pass_args=True,pass_user_data=True))\n dp.add_handler(CommandHandler('help',help,pass_user_data=True,pass_args=True))\n # log all errors\n dp.add_error_handler(error)\n # Add your handlers here\n if webhook_url:\n bot.set_webhook(webhook_url=webhook_url)\n thread = Thread(target=dp.start, name='dispatcher')\n thread.start()\n return update_queue, bot\n else:\n bot.set_webhook() # Delete webhook\n updater.start_polling()\n updater.idle()", "def __command_handler__(self, commands, handler):\n message_set = self.event.text.split(u' ')\n for command in commands:\n if command in message_set:\n handler(self.event, self.vk)\n break", "def _set_handler_to_server(self):\r\n\t\tself._comm_server.set_disconnection_handler(self.player_quit)\r\n\t\tself._comm_server.add_command_handler(\"join\", self.player_join)\r\n\t\tself._comm_server.add_command_handler(\"position\", self.player_position)\r\n\t\tself._comm_server.add_command_handler(\"send-to\", self.player_send_msg)\r\n\t\tself._comm_server.add_command_handler(\"send-team\", self.player_team_broadcast)", "def loadStdCommands(self, player):\n player.addCommand('spawn', self.commands['spawn']())\n player.addCommand('edit', self.commands['edit']())\n player.addCommand('search', self.commands['search']())\n player.addCommand('warp', self.commands['warp']())\n player.addCommand('addstat', self.commands['addstat']())\n player.addCommand('delstat', self.commands['delstat']())\n player.addCommand('savezone', self.commands['savezone']())\n player.addCommand('obliterate', self.commands['obliterate']())", "def on_hook(self) -> None:", "def load_questions(self, verbose=True):\n for question in self.question_list:\n question.load_question(self.data)", "def resetHandlers(self):\n def stop(e):\n raise StopIteration\n self._eventHandlers = {QUIT: stop}\n pygame.event.set_allowed(None) # this should block all event types\n self.addHandlers({}) # then add them back in selectively", "def on_load(self, bot):\n self.bot = bot\n self.connection = bot.get_connection()\n self.plugin_manager = bot.get_plugin_manager()\n self.config = bot.get_config_manager()\n self.data_manager = bot.get_data_manager()", "def loadOlcCommands(self, player):\n player.addCommand('newzone', self.commands['newzone']())\n player.addCommand('delzone', self.commands['delzone']())\n player.addCommand('listzone', self.commands['listzone']())\n player.addCommand('newroom', self.commands['newroom']())\n player.addCommand('redit', self.commands['redit']())\n player.addCommand('delroom', self.commands['delroom']())\n player.addCommand('newportal', self.commands['newportal']())\n player.addCommand('delportal', self.commands['delportal']())\n player.addCommand('zedit', self.commands['zedit']())\n player.addCommand('pedit', self.commands['pedit']())\n player.addCommand('newtemplate', self.commands['newtemplate']())", "def __init__(self):\r\n super(LogParser, self).__init__([CmdQueryHandler(),\r\n UpdateQueryHandler(),\r\n StandardQueryHandler(),\r\n TimeLineHandler()])", "def _register(self, comm, handler):", "def _handle_pending(self):\r\n if not self.pending:\r\n self._post_message('')\r\n return\r\n info, desired = self.pending\r\n if desired and self.plugins[desired].busy:\r\n return\r\n self.busy = True\r\n\r\n if desired:\r\n plugins = [self.plugins[desired]]\r\n elif info.name == 'definition' and not info.editor.is_python():\r\n plugins = [p for p in self.plugins.values() if not p.busy]\r\n else:\r\n # use all but the fallback\r\n plugins = [p for p in list(self.plugins.values())[:-1] if not p.busy]\r\n\r\n self.request = RequestHandler(info, plugins)\r\n self.request.introspection_complete.connect(\r\n self._introspection_complete)\r\n self.pending = None", "def __initSpellingActions(self):\n self.spellingActGrp = createActionGroup(self)\n \n self.spellCheckAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Check spelling'),\n UI.PixmapCache.getIcon(\"spellchecking.png\"),\n QCoreApplication.translate(\n 'ViewManager', 'Check &spelling...'),\n QKeySequence(QCoreApplication.translate(\n 'ViewManager', \"Shift+F7\", \"Spelling|Spell Check\")),\n 0,\n self.spellingActGrp, 'vm_spelling_spellcheck')\n self.spellCheckAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Perform spell check of current editor'))\n self.spellCheckAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Check spelling</b>\"\"\"\n \"\"\"<p>Perform a spell check of the current editor.</p>\"\"\"\n ))\n self.spellCheckAct.triggered.connect(self.__spellCheck)\n self.spellingActions.append(self.spellCheckAct)\n \n self.autoSpellCheckAct = E5Action(\n QCoreApplication.translate(\n 'ViewManager', 'Automatic spell checking'),\n UI.PixmapCache.getIcon(\"autospellchecking.png\"),\n QCoreApplication.translate(\n 'ViewManager', '&Automatic spell checking'),\n 0, 0,\n self.spellingActGrp, 'vm_spelling_autospellcheck', True)\n self.autoSpellCheckAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', '(De-)Activate automatic spell checking'))\n self.autoSpellCheckAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Automatic spell checking</b>\"\"\"\n \"\"\"<p>Activate or deactivate the automatic spell checking\"\"\"\n \"\"\" function of all editors.</p>\"\"\"\n ))\n self.autoSpellCheckAct.setChecked(\n Preferences.getEditor(\"AutoSpellCheckingEnabled\"))\n self.autoSpellCheckAct.triggered.connect(\n self.__setAutoSpellChecking)\n self.spellingActions.append(self.autoSpellCheckAct)\n \n self.__enableSpellingActions()", "def __init__(self):\n self.websock_handlers = {}\n self.ajax_handlers = {'__dashboard__': self.get_dashboard_ui}\n self.dashboard_handlers = {}", "def _load_quasar_classes(self):\n\n logging.debug('Loading quasar classes: begin')\n self._quasar_classes = {}\n for klass in self.design_inspector.get_names_of_all_classes():\n objectified_class = self.design_inspector.objectify_class(klass)\n quasar_class = QuasarClass(objectified_class, self)\n self._quasar_classes[klass] = quasar_class\n logging.debug(f'Loading quasar classes: end, loaded {len(self._quasar_classes)}')", "async def setup(self):\n\t\tlogging.config.dictConfig(self.log_settings['log'])\n\t\tself.logger = logging.getLogger('Responder3')\n\t\tself.create_dir_strucutre()\n\n\t\tif 'handlers' in self.log_settings:\n\t\t\tasync for handlerclass, handler in self.get_handlers():\n\t\t\t\tawait self.start_extension(handlerclass, self.log_settings[self.log_settings['handlers'][handler]])", "def setup_commands(bot):\n # Reset the bot's command setup\n bot.reset_commands()\n # Load enabled mods\n for mod in bot.enabled_mods:\n try:\n full = 'mod_%s' % mod\n m = getattr(__import__('mods.%s' % full), full)\n except Exception:\n bot.log(ERROR, 'Importing the %s mod failed!' % mod)\n sys.excepthook(*sys.exc_info())\n continue\n\n try:\n bot.installed_mods[mod] = m\n # Check for a 404 handler, and replace the current one if there is\n p404 = getattr(m, 'handle_404', None)\n if p404:\n bot.cb_404 = p404\n\n # Check for a setup function, and run it if there is\n setup = getattr(m, 'setup', None)\n if setup:\n setup(bot)\n\n # Required command bank\n for cmd in m.command_bank:\n # Get the actual function\n func = getattr(m, cmd)\n # Get the args for the command\n data = m.command_bank[cmd]\n # If data[0] is true, mod_help will recognize this command\n if data[0]:\n bot.help_db[data[1]] = parse_help(func)\n # Get the main name and aliases inserted\n for alias in data[1:]:\n bot.command_db[alias] = func\n\n # Helper function for optional nameless multiples\n def add_optional(olist, name):\n olist.extend(getattr(m, f) for f in getattr(m, name, ()))\n\n # Optional filters are loaded and added to the list\n add_optional(bot.filters, 'filters')\n\n # Ditto for time-cycle callbacks\n add_optional(bot.periodic_cbs, 'periodic')\n\n # Handlers are the same, but structured as a dict with\n # \"type\": \"single function-name\" items\n handlers = getattr(m, 'handlers', None)\n if handlers:\n for cbtype in handlers:\n bot.handlers[cbtype].append(getattr(m, handlers[cbtype]))\n\n # Register any requirements\n # NOTE: By putting this at the end, we avoid the possibility of\n # getting fake requires.\n reqs = getattr(m, 'requires', None)\n if reqs:\n bot.required_mods.update(reqs)\n except Exception:\n bot.log(ERROR, 'Unable to install the %s mod!' % mod)\n del bot.installed_mods[mod]\n sys.excepthook(*sys.exc_info())\n\n missing = bot.required_mods - set(bot.installed_mods)\n if missing:\n raise MissingRequirementsError(missing)\n\n # And now for the post-install triggers.\n for mod, m in bot.installed_mods.items():\n post = getattr(m, 'post_prepare', None)\n if post:\n try:\n post(bot)\n except Exception:\n bot.log(ERROR, 'Unable to post-prepare the %s mod!' % mod)\n sys.excepthook(*sys.exc_info())", "def _query_commands(self):\n # TODO: make this work\n self.player.respond(\"Hi there! Ask me to play artists or songs. \"\n \"I can also find songs that are similar to other \"\n \"artists.\")", "def get_command_handlers(self):\n\t\treturn self.command_handlers", "def run_game_logic(self):\n pass", "def _hook(self):", "def __init__(self, c: Cmdr) -> None:\n # pylint: disable=super-init-not-called\n self.c = c\n self.handler: SpellTabHandler = None\n self.reloadSettings()", "def __load(self, *args, **kwargs):\n self.__should_load = True\n return Menu.CONTINUE", "def _init_commands(self):\n\t\tself.commands = {}\n\t\tself.log.info(\"Initializing commands...\")\n\t\t# Get all the commands and iterate over them\n\t\tfor command in self.conf_commands:\n\t\t\t\n\t\t\t# Verify the necessary config elements exist at all\n\t\t\tdisabled = command.get('disabled', False) # Disabled is optional, defaults to False\n\t\t\tif(disabled == True):\n\t\t\t\tcontinue;\n\t\t\tcommand_name = command.get('name', \"unknown\").lower()\n\t\t\tdescription = command.get('description', \"\")\n\t\t\tpermission_str = command.get('permission', None)\n\t\t\taction = command.get('action', None)\n\t\t\tmin_votes = command.get('min_votes', None)\n\t\t\targs = command.get('args', None)\n\t\t\taliases = command.get('aliases', None)\n\t\t\tif(command_name is None \n\t\t\t\tor permission_str is None \n\t\t\t\tor action is None \n\t\t\t\tor min_votes is None \n\t\t\t\tor args is None):\n\t\t\t\tself.log.warn(\"Command '{}': Error, missing 'permission', 'action', 'min_votes', or 'args' elements for command \".format(command_name))\n\t\t\t\tcontinue\n\n\t\t\t# Verify the votes and permission string are valid\n\t\t\tif(min_votes < 0):\n\t\t\t\tself.log.warn(\"Command '{}': Error, min_votes cannot be less than zero for command {}\".format(command_name, min_votes))\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tself.log.debug(\"Command '{}': minimum votes is {}\".format(command_name, min_votes))\n\n\t\t\ttry:\n\t\t\t\tpermission = Permission[permission_str]\n\t\t\t\tself.log.debug(\"Command '{}': permission is {}\".format(command_name, permission))\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.warn(\"Command '{}': Error, permission string '{}' is invalid, must be one of: {}\".format(command_name, permission_str, Permission.__members__))\n\t\t\t\tcontinue\n\n\t\t\t# Try to get the corresponding action class\n\t\t\ttry:\n\t\t\t\tmodule = import_module(\"obs.actions.\"+action)\n\t\t\t\tclass_ = getattr(module, action)\n\t\t\t\tself.log.debug(\"Command {}: action is {}\".format(command_name, class_))\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.warn(\"Command '{}': Error, no such action {} is defined. Full error: {}\".format(command_name, action, e))\n\t\t\t\tcontinue\n\n\t\t\t# Try to instantiate the action class\n\t\t\ttry:\n\t\t\t\tself.log.debug(\"Command {}: args are: {}\".format(command_name, args))\n\t\t\t\tcommand_obj = class_(self, command_name, aliases, description, permission, min_votes, args)\n\t\t\texcept ValueError as e:\n\t\t\t\tself.log.warn(e)\n\t\t\t\tcontinue\n\n\t\t\t# Add command_obj to internal reference\n\t\t\tself.commands[command_name] = command_obj\n\n\t\t\t# If there are aliases, add them too\n\t\t\t\n\t\t\tif(not aliases is None and isinstance(aliases, (list,) )):\n\t\t\t\tself.log.debug(\"Command '{}': Found aliases {}\".format(command_name, aliases))\n\t\t\t\tfor alias in aliases:\n\t\t\t\t\tself.commands[alias] = command_obj\n\t\t\telse:\n\t\t\t\tself.log.debug(\"Command '{}': No aliases\".format(command_name, aliases))\n\n\t\t# Finally after all commands have been initialized then add the help command\n\t\t#self.commands['help'] = Help(self)\n\n\t\t# Done initializing\n\t\tself.log.info(\"...Commands initialized: {}\".format(\n\t\t\t\tlist( self.commands.keys()) \n\t\t\t)\n\t\t)", "async def on_ready():\n print('We have logged in as {0.user}'.format(client))\n global command_dictionary\n command_dictionary = get_setting_commands()", "async def on_ready():\n # Sets bot's status\n await bot.change_presence(status=discord.Status.idle, activity=discord.Game('with your girlfriend ;)'))\n # Initial extensions to load.\n await ext_manager.load()\n\n print('Bot is ready.')", "def start_collision_handlers(self):\n self.missile_and_terrain.begin = self.missile_terrain_collision_begin\n self.missile_and_spacecraft_handler.begin = self.missile_spacecraft_collision_begin\n self.spacecraft_and_terrain_handler.begin = self.spacecraft_terrain_collision_begin\n\n self.missile_and_spacecraft_handler.pre_solve = self.collision_pre\n self.missile_and_spacecraft_handler.post_solve = self.collision_post_solve\n self.missile_and_spacecraft_handler.separate = self.collision_separate\n self.missile_and_terrain.pre_solve = self.collision_pre\n self.missile_and_terrain.post_solve = self.collision_post_solve\n self.missile_and_terrain.separate = self.collision_separate\n self.spacecraft_and_terrain_handler.pre_solve = self.collision_pre\n self.spacecraft_and_terrain_handler.post_solve = self.collision_post_solve\n self.spacecraft_and_terrain_handler.separate = self.collision_separate", "def get_pygame_events(self):\n for event in pygame.event.get():\n if event.type in self.registered_pygame_handlers:\n for handler in self.registered_pygame_handlers[event.type]:\n\n if (event.type == pygame.KEYDOWN or\n event.type == pygame.KEYUP):\n handler(event.key, event.mod)\n else:\n handler()", "def on_enter(self):\n\n super(BaseScene, self).on_enter()\n\n self.load_map()\n self.load_players()\n self.load_enemies()\n self.load_status_bar()\n\n self.enemies_layer.next_wave()", "def load_settings(self):\r\n log.info('Loading settings...')\r\n\r\n # Load default language.\r\n actions = self.menuLanguage.actions()\r\n for action in actions:\r\n if action.data().toString() == self.config.default_language:\r\n action.setChecked(True)\r\n self.translate(action)\r\n break\r\n\r\n # Load Talks as a SQL Data Model.\r\n self.load_event_list()", "def import_handlers(self):\n if not self._import_handlers:\n self._initialize_handlers()\n\n return self._import_handlers", "def _read_from_loader(self, loader):\n self._domain = loader.get_domain() or ''\n self._version = loader.get_version()\n\n # Load UIM manager\n # Do this first since the custom menubar and toolbars adapters\n # depends on all ui definitions being loaded\n self.uim.load(loader)\n\n # Load models before widgets\n models = [w for w in loader.toplevels if isinstance(w, gtk.ListStore)]\n for model in models:\n self.model_manager.load_model(model)\n\n # Load the widgets\n for widget in loader.toplevels:\n if isinstance(widget, gtk.Widget):\n self._load_widget(widget)\n\n # Load sizegroups, must be done after loading all the widgets,\n # since the sizegroups has references to the widgets\n for sizegroup in loader.sizegroups:\n name = sizegroup.get_data('gazpacho::object-id')\n widgets = sizegroup.get_data('gazpacho::sizegroup-widgets') or []\n gadgets = [Gadget.from_widget(widget)\n for widget in widgets]\n self.add_sizegroup(GSizeGroup(name, sizegroup, gadgets))\n\n # Signals\n for signal in loader.get_signals():\n gobj, signal_name, signal_handler, signal_after = signal[:4]\n gadget = Gadget.from_widget(gobj)\n if gadget is None:\n continue\n gadget.add_signal_handler(SignalInfo(name=signal_name,\n handler=signal_handler,\n after=signal_after))\n\n self._unsupported_widgets = loader.get_unsupported_widgets()\n\n self.changed = False", "async def loadpokemon(self, ctx):\n await self.bot.di.new_items(ctx.guild, (ServerItem(**item) for item in self.bot.pokemonitems.values()))\n await ctx.send(await _(ctx, \"Successfully added all Pokemon items!\"))", "def ready(self):\n import main.signals # noqa", "def _starting_up():\n global ws, skill_reload_thread, event_scheduler\n\n ws.on('intent_failure', FallbackSkill.make_intent_failure_handler(ws))\n\n # Create skill_manager listener and invoke the first time\n ws.on('skill_manager', skills_manager)\n ws.on('mycroft.internet.connected', install_default_skills)\n ws.emit(Message('skill_manager', {}))\n\n # Create the Intent manager, which converts utterances to intents\n # This is the heart of the voice invoked skill system\n\n PadatiousService(ws)\n IntentService(ws)\n event_scheduler = EventScheduler(ws)\n # Create a thread that monitors the loaded skills, looking for updates\n skill_reload_thread = WatchSkills()\n skill_reload_thread.daemon = True\n skill_reload_thread.start()\n\n # Wait until skills have been loaded once before starting to check\n # network connection\n skill_reload_thread.wait_loaded_priority()\n check_connection()", "def postLoad(self):\n pass" ]
[ "0.6526081", "0.6394275", "0.60561556", "0.59975696", "0.596935", "0.5927591", "0.59075165", "0.5878692", "0.569096", "0.5556501", "0.5527142", "0.5512644", "0.55046976", "0.54805523", "0.54044414", "0.5394727", "0.5393246", "0.5391057", "0.5354453", "0.5347178", "0.53144395", "0.5286961", "0.5270832", "0.52531284", "0.5250341", "0.5240783", "0.52251697", "0.52249855", "0.5177247", "0.51489383", "0.5141873", "0.51351637", "0.51078063", "0.5085512", "0.5085512", "0.50538313", "0.504434", "0.50292706", "0.50289786", "0.50280654", "0.5022588", "0.50195086", "0.5019506", "0.50181895", "0.5016366", "0.5013699", "0.50093", "0.50070405", "0.50041044", "0.49975422", "0.49869633", "0.49788392", "0.49622646", "0.49537742", "0.49514312", "0.49378362", "0.49314734", "0.4920258", "0.4919678", "0.4919371", "0.49181545", "0.49177563", "0.4914914", "0.49144697", "0.49133134", "0.49065366", "0.48949796", "0.4893158", "0.48878944", "0.4887469", "0.48863396", "0.48841852", "0.48828146", "0.4869569", "0.48633692", "0.4854513", "0.48508817", "0.4844638", "0.48428863", "0.48416007", "0.48409718", "0.48394307", "0.48284444", "0.48230097", "0.48180327", "0.4817297", "0.48089492", "0.48060033", "0.48039472", "0.48028132", "0.4801933", "0.48018548", "0.48010564", "0.48007485", "0.48006654", "0.47902268", "0.47893026", "0.4784012", "0.47812134", "0.47807208" ]
0.71654475
0
Add a quest handler to the aiohttp app
def add_quest(self, method: str, route: str, handler): self.aiohttp.router.add_route(method, route, handler)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _response_handler(self):", "async def startup_handler(app):\n\n spotify_client_id = os.environ.get(SPOTIFY_CLIENT_ID)\n spotify_client_secret = os.environ.get(SPOTIFY_CLIENT_SECRET)\n\n # Save dependencies in the HTTP app.\n http.register_dependency(app, SPOTIFY_CLIENT_ID, spotify_client_id)\n http.register_dependency(app, SPOTIFY_CLIENT_SECRET, spotify_client_secret)\n\n async def cleanup(app):\n \"\"\"Perform required cleanup on shutdown\"\"\"\n # await client_session.close()\n\n app.on_shutdown.append(cleanup)", "def launch_request_handler(handler_input):\n speech_text = \"Hello! Are you looking to connect and play with others?\"\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Hello! Are you looking to connect and play with others?\", speech_text)).set_should_end_session(False)\n return handler_input.response_builder.response", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech_text = f\"Yo yo yo what's popping. Come checkout what is up with your Monzo\"\n\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Hello World\", speech_text)).set_should_end_session(\n False)\n return handler_input.response_builder.response", "async def init_server() -> aiohttp.web.Application:\n app = aiohttp.web.Application(\n middlewares=[\n swift_browser_ui.common.common_middleware.add_cors, # type: ignore\n swift_browser_ui.common.common_middleware.check_db_conn, # type: ignore\n swift_browser_ui.common.common_middleware.handle_validate_authentication, # type: ignore\n swift_browser_ui.common.common_middleware.catch_uniqueness_error, # type: ignore\n swift_browser_ui.common.common_middleware.error_handler, # type: ignore\n ]\n )\n\n async def on_prepare(\n _: aiohttp.web.Request, response: aiohttp.web.StreamResponse\n ) -> None:\n \"\"\"Modify Server headers.\"\"\"\n response.headers[\"Server\"] = \"Swift Browser Request\"\n\n # add custom response headers\n app.on_response_prepare.append(on_prepare)\n\n app.add_routes(\n [\n aiohttp.web.get(\"/health\", handle_health_check),\n ]\n )\n\n app.add_routes(\n [\n aiohttp.web.options(\n \"/request/user/{user}/{container}\",\n swift_browser_ui.common.common_handlers.handle_delete_preflight,\n ),\n aiohttp.web.post(\n \"/request/user/{user}/{container}\", handle_share_request_post\n ),\n aiohttp.web.delete(\n \"/request/user/{user}/{container}\", handle_user_share_request_delete\n ),\n aiohttp.web.get(\"/request/user/{user}\", handle_user_made_request_listing),\n aiohttp.web.get(\"/request/owner/{user}\", handle_user_owned_request_listing),\n aiohttp.web.get(\n \"/request/container/{container}\", handle_container_request_listing\n ),\n ]\n )\n\n app.add_routes(\n [\n aiohttp.web.options(\n \"/token/{project}/{id}\",\n swift_browser_ui.common.common_handlers.handle_delete_preflight,\n ),\n aiohttp.web.post(\"/token/{project}/{id}\", handle_user_add_token),\n aiohttp.web.delete(\"/token/{project}/{id}\", handle_user_delete_token),\n aiohttp.web.get(\"/token/{project}\", handle_user_list_tokens),\n ]\n )\n\n app.on_startup.append(resume_on_start)\n app.on_startup.append(swift_browser_ui.common.common_util.read_in_keys)\n app.on_shutdown.append(graceful_shutdown)\n\n return app", "def cli(app, aiohttp_client):\n return asyncio.get_event_loop().run_until_complete(aiohttp_client(app.app))", "def cli(loop, aiohttp_client, known_domain_data):\n app = web.Application()\n\n async def get_handler(request):\n return web.json_response(known_domain_data)\n\n async def bad_get_handler(request):\n return web.json_response(\n {'errors': [{'code': '50004', 'detail': 'URL is not found.'}]},\n status=500\n )\n\n async def post_handler(request):\n json_data = await request.json()\n response_dict = known_domain_data\n response_dict.update(json_data)\n if request.query:\n response_dict['query_args'] = dict(request.query)\n return web.json_response(response_dict)\n\n async def put_handler(request):\n json_data = await request.json()\n response_dict = known_domain_data\n response_dict.update(json_data)\n if request.query:\n response_dict['query_args'] = dict(request.query)\n return web.json_response(response_dict)\n\n app.router.add_get(path='/cli-test', handler=get_handler)\n app.router.add_post(path='/cli-test', handler=post_handler)\n app.router.add_put(path='/cli-test', handler=put_handler)\n app.router.add_get(path='/cli-test-bad', handler=bad_get_handler)\n\n return loop.run_until_complete(aiohttp_client(app))", "async def serve(app, flow: http.HTTPFlow):\n\n scope = make_scope(flow)\n done = asyncio.Event()\n received_body = False\n sent_response = False\n\n async def receive():\n nonlocal received_body\n if not received_body:\n received_body = True\n return {\n \"type\": \"http.request\",\n \"body\": flow.request.raw_content,\n }\n else: # pragma: no cover\n # We really don't expect this to be called a second time, but what to do?\n # We just wait until the request is done before we continue here with sending a disconnect.\n await done.wait()\n return {\"type\": \"http.disconnect\"}\n\n async def send(event):\n if event[\"type\"] == \"http.response.start\":\n flow.response = http.Response.make(\n event[\"status\"], b\"\", event.get(\"headers\", [])\n )\n flow.response.decode()\n elif event[\"type\"] == \"http.response.body\":\n assert flow.response\n flow.response.content += event.get(\"body\", b\"\")\n if not event.get(\"more_body\", False):\n nonlocal sent_response\n sent_response = True\n else:\n raise AssertionError(f\"Unexpected event: {event['type']}\")\n\n try:\n await app(scope, receive, send)\n if not sent_response:\n raise RuntimeError(f\"no response sent.\")\n except Exception:\n logger.error(f\"Error in asgi app:\\n{traceback.format_exc(limit=-5)}\")\n flow.response = http.Response.make(500, b\"ASGI Error.\")\n finally:\n done.set()", "def on_startup():\n\n async def startup_handler(app):\n \"\"\"Run all initialization tasks.\n These are tasks that should be run after the event loop has been started but before the HTTP\n server has been started.\n \"\"\"\n\n spotify_client_id = os.environ.get(SPOTIFY_CLIENT_ID)\n spotify_client_secret = os.environ.get(SPOTIFY_CLIENT_SECRET)\n\n # Save dependencies in the HTTP app.\n http.register_dependency(app, SPOTIFY_CLIENT_ID, spotify_client_id)\n http.register_dependency(app, SPOTIFY_CLIENT_SECRET, spotify_client_secret)\n\n async def cleanup(app):\n \"\"\"Perform required cleanup on shutdown\"\"\"\n # await client_session.close()\n\n app.on_shutdown.append(cleanup)\n\n return startup_handler", "def _run_aiohttp(port):\n loop = aio.get_event_loop()\n aio_app = init_app(loop)\n handler = aio_app.make_handler()\n srv = loop.run_until_complete(\n loop.create_server(\n handler,\n '0.0.0.0',\n port,\n ))\n print(\"serving on\", srv.sockets[0].getsockname())\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n finally:\n srv.close()\n loop.run_until_complete(srv.wait_closed())\n loop.run_until_complete(aio_app.shutdown())\n loop.run_until_complete(handler.shutdown(_HANDLER_SHUTDOWN_SEC))\n loop.run_until_complete(aio_app.cleanup())\n loop.close()", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech = \"Welcome to the Merriam-Webster Dictionary. What word can I look up for you?\"\n reprompt = \"You can say: definition of word, example of word, or synonym of word.\"\n\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response", "def config(self):\n\n # Set up on_startup listener for connecting to the server\n self.aiohttp.on_startup.append(self.ws.connect)\n\n # Await websocket and client session termination\n async def shutdown(app):\n await self.ws.close()\n await self.client.close()\n\n # Set up on_shutdown listeners for graceful shutdown\n self.aiohttp.on_shutdown.append(shutdown)\n\n # Add a default route\n self.aiohttp.router.add_route('*', '/', lambda request: web.json_response({ \"msg\": \"I'm alive\" }))\n\n # Load user defined quests\n self.load_quests()", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In LaunchRequestHandler\")\n lang = handler_input.request_envelope.request.locale\n try:\n speech = welcome_speech[lang]\n except:\n speech = \"Language \" + lang + \" is not supported.\"\n\n handler_input.response_builder.speak(\n speech).ask(help_text)\n return handler_input.response_builder.response", "def apiai_hook():\n\n route = {\n 'artist_bio': artist_bio,\n 'artist_top_tracks': artist_top_tracks,\n 'artist_similar': artist_similar,\n 'track_similar': track_similar,\n }\n\n req = request.get_json(silent=True, force=True)\n response = {}\n try:\n response = route[req.get('result').get('action')](req)\n except (KeyError, AttributeError) as e:\n logger.error('Invalid action specified, error=\"{0}\".'.format(e))\n return jsonify(response)\n\n return response", "def on_intent(request, session):\n\n intent = request['intent']\n\n print(\"on_intent:\", intent)\n\n if intent[\"name\"] == \"AntwortIntent\":\n return handle_answer_request(intent, session)\n elif intent[\"name\"] == \"DontKnowIntent\":\n return handle_answer_request(intent, session)\n elif intent['name'] == \"AMAZON.RepeatIntent\":\n return handle_repeat_request(intent, session)\n elif intent['name'] == \"AMAZON.StopIntent\" or intent['name'] == \"AMAZON.CancelIntent\":\n return handle_finish_session_request(intent, session)\n elif intent['name'] == \"AMAZON.HelpIntent\":\n return get_help(intent, session)\n elif intent['name'] == \"StartQuizIntent\" or intent['name'] == \"AMAZON.StartoverIntent\":\n if session[\"new\"] == False:\n return get_welcome_message(restart=True)\n #if no intent is identified:\n return get_help(intent, session)", "async def handle_async(req):\n return await logic_async(req)", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech = \"Welcome to the Alexa Skills Kit color session sample.\"\n\n handler_input.response_builder.speak(\n speech + \" \" + help_text).ask(help_text)\n return handler_input.response_builder.response", "async def test_async_handler(dm):\n assert not dm.called_async_handler\n request = create_request(\"domain\", \"async\")\n response = create_responder(request)\n result = await dm.apply_handler(request, response)\n assert dm.called_async_handler\n assert result.dialogue_state == \"async_handler\"\n assert len(result.directives) == 1\n assert result.directives[0][\"name\"] == \"reply\"\n assert result.directives[0][\"payload\"] == {\"text\": \"this is the async handler\"}", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech_text = \"Welcome to the Transit Time skill, ask when the next bus is coming!\"\n\n return handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Transit Time\", speech_text)).set_should_end_session(\n False).response", "def add_handler(self, handler):\n pass", "def on_intent(event):\n\n intent = event[\"request\"][\"intent\"][\"name\"]\n\n if intent in (\"AMAZON.CancelIntent\", \"AMAZON.StopIntent\", \"AMAZON.NoIntent\"):\n return handle_session_end_request()\n\n if intent == \"AMAZON.YesIntent\":\n if \"attributes\" in event[\"session\"] and \"previousIntent\" in \\\n event[\"session\"][\"attributes\"]:\n\n if event[\"session\"][\"attributes\"][\"previousIntent\"] == \"AMAZON.HelpIntent\":\n return main_handler(event)\n\n speech_output = event[\"session\"][\"attributes\"][\"nextStations\"]\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n speech_output = \"Sorry, something went wrong.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n if intent == \"isBikesAvailable\":\n return main_handler(event)\n\n if intent == \"AMAZON.HelpIntent\":\n return handle_help_intent()\n\n speech_output = \"Sorry, I don\\'t know that.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)", "async def main():\n bot = triogram.make_bot()\n async with bot, trio.open_nursery() as nursery:\n nursery.start_soon(bot)\n nursery.start_soon(echo, bot)\n nursery.start_soon(echo_once, bot)", "def help_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n handler_input.response_builder.speak(help_text).ask(help_text)\n return handler_input.response_builder.response", "def help_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n handler_input.response_builder.speak(help_text).ask(help_text)\n return handler_input.response_builder.response", "def help_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n handler_input.response_builder.speak(help_text).ask(help_text)\n return handler_input.response_builder.response", "def unhandled_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n intent_name = get_intent_name(handler_input)\n if intent_name == 'ChallengeBossIntent':\n speech_text = 'You need to be in the boss room to challenge the boss. '\n elif intent_name == 'EnterMazeIntent':\n speech_text = 'You already have a maze in progress. Would you like to resume the maze or discard the maze? '\n elif intent_name == 'ResumeMazeIntent' or intent_name == 'DiscardMazeIntent':\n speech_text = 'You are already in a maze or you don\\'t have a maze in progress. Say enter the maze or discard the maze. '\n elif intent_name == 'LocationIntent':\n speech_text = 'You need to be in a maze to locate yourself. Say enter the maze or resume the maze. '\n elif intent_name == 'MoveIntent':\n speech_text = 'You need to be in a maze to take a move. Say enter the maze or resume the maze. '\n else:\n speech_text = 'I am not sure what you are saying. '\n\n handler_input.response_builder.speak(\n speech_text).set_should_end_session(False)\n return handler_input.response_builder.response", "def launch_request_handler(handler_input: HandlerInput) -> Response:\n day = events.get_date()\n text = events.for_day(day)\n log.info(f\"launch: events for {day} = {text}\")\n return (\n handler_input.response_builder.speak(text)\n .set_card(SimpleCard(f\"Hillbrook events for {day.strftime('%A')}:\\n{text}\"))\n .set_should_end_session(True)\n .response\n )", "async def app(scope, receive, send):\n html = b\"\"\"\n <!doctype html>\n <html>\n <head>\n <title>Hello ASGI!</title>\n </head>\n <body>\n <main>\n <h1>Hello ASGI!</h1>\n </main>\n </body>\n </html>\n \"\"\"\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": 200,\n \"headers\": [[b\"content-type\", b\"text/html\"], [b\"content-length\", b\"269\"],],\n }\n )\n await send(\n {\"type\": \"http.response.body\", \"body\": html, \"more_body\": False,}\n )", "async def handle_request(self, request: aioweb.request.Request):", "def event_handler(self, response):\n pass", "def main_handler(event):\n\n address = \"\"\n try:\n address = get_address(event)\n except ValueError:\n # Value error is raised if no permissions to address\n return ask_permissions()\n except Exception:\n # Some other error when getting location\n speech_output = \"Sorry, error occurred when retrieving device address.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n nearest_stations = get_nearest_stations(3, address)\n station = nearest_stations[0]\n\n speech_output = f\"On station {station['name']} is {station['bikesAvailable']} \" \\\n \"bikes available. Do you want to hear more nearby stations?\"\n\n session_attributes = {\n \"previousIntent\": \"mainHandler\",\n \"nextStations\": build_next_stations(nearest_stations[1:3])\n }\n\n response = build_speechlet_response(CARD_TITLE, speech_output, False)\n return build_response(response, session_attributes)", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n # intents_object = get_custom_intents()\n print (\"************\")\n print (intent_request)\n # fall_back = True\n # final_function = ''\n # for temp_intent in intents_object:\n # if temp_intent == intent_name:\n # fall_back = False\n # final_function = temp_intent[1]\n # break\n # if(fall_back):\n # return custom_handlers.get_fallback_msg()\n # else:\n # return final_function(intent, session)\n \n # Dispatch to your skill's intent handlers\n if intent_name == \"welcome_intent\":\n return custom_handlers.get_welcome_msg(intent, session)\n elif intent_name == \"search_intent\":\n return custom_handlers.get_search_msg(intent, session)\n elif intent_name == \"architecture\":\n return custom_handlers.get_architecture_msg(intent, session)\n elif intent_name == \"saybye\":\n return custom_handlers.get_saybye_response(intent, session)\n elif intent_name == \"myname\":\n return custom_handlers.get_myname_response(intent, session)\n elif intent_name == \"ask\":\n return custom_handlers.get_ask_response(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return custom_handlers.get_welcome_response(intent, session)\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return custom_handlers.handle_session_end_request(intent, session)\n else:\n return custom_handlers.get_fallback_msg(intent, session)", "async def index(request):\n # Note: we return a dict not a response because of the @template decorator\n return {\n 'title': request.app['name'],\n 'intro': \"Success! you've setup a basic aiohttp app.\",\n }", "def make_new_handler(self, *args, **kwargs):", "def __init__( # type:ignore\n self,\n port: int,\n path: str,\n app: \"AsyncApp\", # type:ignore\n ):\n self.port = port\n self.path = path\n self.bolt_app: \"AsyncApp\" = app\n self.web_app = web.Application()\n self._bolt_oauth_flow = self.bolt_app.oauth_flow\n if self._bolt_oauth_flow:\n self.web_app.add_routes(\n [\n web.get(\n self._bolt_oauth_flow.install_path, self.handle_get_requests\n ),\n web.get(\n self._bolt_oauth_flow.redirect_uri_path,\n self.handle_get_requests,\n ),\n web.post(self.path, self.handle_post_requests),\n ]\n )\n else:\n self.web_app.add_routes([web.post(self.path, self.handle_post_requests)])", "async def async_start_hermod():\n print('START HERMOD SERVICES')\n module_dir = os.getcwd()\n sys.path.append(module_dir)\n\n if ARGS.webserver:\n webservice_config = {\n 'certificates_folder': os.getenv('SSL_CERTIFICATES_FOLDER', '/app/certs'),\n 'domain_name': os.getenv('SSL_DOMAIN_NAME', 'localhost'),\n 'email': os.getenv('SSL_EMAIL', 'none@syntithenai.com'),\n }\n # dev mode rebuild web - (NEED docker rebuild with npm global watchify)\n # watchify index.js -v -o static/bundle.js\n CONFIG['services']['WebService'] = webservice_config\n\n if ARGS.actionserver > 0:\n CONFIG['services']['RasaActionsService'] = {}\n\n if ARGS.hermod:\n # admin mqtt connection\n CONFIG['mqtt_hostname'] = os.getenv('MQTT_HOSTNAME') or 'localhost'\n CONFIG['mqtt_hostname'] = os.getenv('MQTT_HOSTNAME') or 'localhost'\n CONFIG['mqtt_port'] = int(os.getenv('MQTT_PORT') or '1883')\n CONFIG['mqtt_user'] = os.getenv('MQTT_USER') or 'hermod_admin'\n CONFIG['mqtt_password'] = os.getenv('MQTT_PASSWORD') or 'talk2mebaby'\n\n # SET SOUND DEVICES\n CONFIG['services']['AudioService'] = {\n \"site\": CONFIG.get('mqtt_user'),\n \"inputdevice\": \"pulse\",\n \"outputdevice\": \"pulse\"}\n if os.getenv(\n 'SPEAKER_DEVICE') is not None and 'AudioService' in CONFIG['services']:\n CONFIG['services']['AudioService']['outputdevice'] = os.getenv(\n 'SPEAKER_DEVICE')\n if os.getenv(\n 'MICROPHONE_DEVICE') is not None and 'AudioService' in CONFIG['services']:\n CONFIG['services']['AudioService']['inputdevice'] = os.getenv(\n 'MICROPHONE_DEVICE')\n\n CONFIG['services']['DialogManagerService'] = {}\n CONFIG['services']['DataLoggerService'] = {}\n\n # HOTWORD\n # #,bumblebee,porcupine\"\n CONFIG['services']['PicovoiceHotwordService'] = {\n \"hotwords\": os.getenv(\n 'PICOVOICE_HOTWORDS',\n \"picovoice\"),\n \"sensitivity\": 0.9}\n\n # ASR\n # Deepspeech\n using_asr = None\n if os.getenv('DEEPSPEECH_MODELS') is not None and os.path.exists(\n os.getenv('DEEPSPEECH_MODELS')):\n if 'DeepspeechAsrService' not in CONFIG['services']:\n CONFIG['services']['DeepspeechAsrService'] = {}\n CONFIG['services']['DeepspeechAsrService']['model_path'] = os.getenv(\n 'DEEPSPEECH_MODELS')\n using_asr = 'Deepspeech'\n\n # disable deepspeech and enable IBM ASR\n if os.getenv('IBM_SPEECH_TO_TEXT_APIKEY', None) is not None and len(\n os.getenv('IBM_SPEECH_TO_TEXT_APIKEY', '')) > 0:\n CONFIG['services'].pop('DeepspeechAsrService', None)\n # 'language': os.environ.get('GOOGLE_APPLICATION_LANGUAGE','en-AU')}\n CONFIG['services']['IbmAsrService'] = {'vad_sensitivity': 1}\n using_asr = 'IBM'\n\n # disable deepspeech,ibm and enable google ASR\n if os.getenv('GOOGLE_ENABLE_ASR') == \"true\" and \\\n os.getenv('GOOGLE_APPLICATION_CREDENTIALS') \\\n and os.path.isfile(os.getenv('GOOGLE_APPLICATION_CREDENTIALS')):\n CONFIG['services'].pop('DeepspeechAsrService', None)\n CONFIG['services'].pop('IbmAsrService', None)\n CONFIG['services']['GoogleAsrService'] = {\n 'language': os.environ.get(\n 'GOOGLE_APPLICATION_LANGUAGE', 'en-AU')}\n using_asr = 'Google'\n print(\"ASR ENABLED using {}\".format(using_asr))\n\n # require asr\n if not using_asr:\n print('ASR CONFIGURATION MISSING')\n sys.exit()\n\n # TTS\n if os.getenv('GOOGLE_ENABLE_TTS') == \"true\" and \\\n os.getenv('GOOGLE_APPLICATION_CREDENTIALS') and \\\n os.path.isfile(os.getenv('GOOGLE_APPLICATION_CREDENTIALS')):\n print('TTS ENABLED USING GOOGLE')\n CONFIG['services'].pop('Pico2wavTtsService', False)\n CONFIG['services']['GoogleTtsService'] = {'language': os.environ.get(\n 'GOOGLE_APPLICATION_LANGUAGE', 'en-AU'), 'cache': '/tmp/tts_cache'} # }\n else:\n CONFIG['services'].pop('GoogleTtsService', None)\n CONFIG['services']['Pico2wavTtsService'] = {\n 'binary_path': os.environ.get(\n 'TTS_BINARY',\n '/usr/bin/pico2wave'),\n 'cache_path': os.environ.get(\n 'TTS_CACHE',\n '/tmp/tts_cache')} # }\n print('TTS ENABLED USING PICO2WAV')\n\n if os.getenv('RASA_URL') and len(os.getenv('RASA_URL')) > 0:\n print('RASA ENABLED USING URL ' + os.getenv('RASA_URL'))\n rasa_service = CONFIG['services'].get('RasaService', {})\n rasa_service['rasa_server'] = os.getenv('RASA_URL')\n rasa_service['keep_listening'] = os.getenv(\n 'HERMOD_KEEP_LISTENING', 'false')\n CONFIG['services']['RasaService'] = rasa_service\n else:\n print('RASA ENABLED USING LOCAL ')\n rasa_service = CONFIG['services'].get('RasaServiceLocal', {})\n rasa_service['rasa_actions_url'] = os.getenv(\n 'RASA_ACTIONS_URL', '')\n rasa_service['keep_listening'] = os.getenv(\n 'HERMOD_KEEP_LISTENING', 'false')\n rasa_service['model_path'] = os.getenv(\n 'RASA_MODEL', '/app/rasa/models/model.tar.gz')\n CONFIG['services']['RasaServiceLocal'] = rasa_service\n\n # satellite mode restrict to audio and hotword services\n if ARGS.satellite:\n services = {\n 'AudioService': CONFIG['services']['AudioService'],\n 'PicovoiceHotwordService': CONFIG['services']['PicovoiceHotwordService']}\n CONFIG['services'] = services\n # no local audio/hotword\n if ARGS.nolocalaudio:\n if 'AudioService' in CONFIG['services']:\n del CONFIG['services']['AudioService']\n if 'PicovoiceHotwordService' in CONFIG['services']:\n del CONFIG['services']['PicovoiceHotwordService']\n\n # satellite mode\n if ARGS.satellite:\n services = {\n 'AudioService': CONFIG['services']['AudioService'],\n 'PicovoiceHotwordService': CONFIG['services']['PicovoiceHotwordService']}\n CONFIG['services'] = services\n # no local audio/hotword\n if ARGS.nolocalaudio:\n if 'AudioService' in CONFIG['services']:\n del CONFIG['services']['AudioService']\n if 'PicovoiceHotwordService' in CONFIG['services']:\n del CONFIG['services']['PicovoiceHotwordService']\n\n loop = asyncio.get_event_loop()\n # loop.set_debug(True)\n run_services = []\n for service in CONFIG['services']:\n # force dialog initialise if argument present\n full_path = os.path.join(module_dir, 'src', service + '.py')\n module_name = pathlib.Path(full_path).stem\n module = importlib.import_module(module_name)\n print(module_name)\n module_function = getattr(module, service)(CONFIG, loop)\n run_services.append(module_function.run())\n # extra event loop threads on init\n if hasattr(module_function, 'also_run'):\n for i in module_function.also_run:\n run_services.append(i())\n print('starting services')\n print(run_services)\n await asyncio.gather(*run_services, return_exceptions=True)", "def handle_app(self, app, **options):\n raise NotImplementedError()", "def help_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech_text = \"You can ask when the next bus is coming!\"\n\n return handler_input.response_builder.speak(speech_text).ask(\n speech_text).set_card(SimpleCard(\n \"Transit Time\", speech_text)).response", "def app(self) -> traits.RESTAware:", "def app(self) -> traits.RESTAware:", "def add_hoist(self, app: Flask, handle_errors: bool = True, auth: list = [\"\"], premade_pages: bool = True) -> Flask:\n if hasattr(app, 'HOIST_INTERNALSERVER'):\n raise HoistExistsError('hoist is already set up on app')\n\n app.HOIST_INTERNALSERVER = Server(app, handle_errors)\n\n @app.route('/hoist/send', methods=['POST'])\n def hoist_send() -> str:\n return self.get_response(app, auth, app.HOIST_INTERNALSERVER._received, 'msg')\n\n if premade_pages:\n @app.route('/hoist', methods=['POST', 'GET'])\n def hoist_home() -> str:\n if request.method == 'POST':\n return jsonify({'RESPONSE': f'Version {__version__}'})\n\n # done with html instead of flask.render_template so i dont have to touch the apps template_folder property\n \n html = HTML.replace('{{ version }}', __version__).replace('{{ serverUrl }}', request.base_url)\n\n return html\n \n\n return app", "async def async_setup(hass: HomeAssistant, config: dict):\n hass.http.register_view(IntentHandleView())\n\n await integration_platform.async_process_integration_platforms(\n hass, DOMAIN, _async_process_intent\n )\n\n hass.helpers.intent.async_register(\n intent.ServiceIntentHandler(\n intent.INTENT_TURN_ON, HA_DOMAIN, SERVICE_TURN_ON, \"Turned {} on\"\n )\n )\n hass.helpers.intent.async_register(\n intent.ServiceIntentHandler(\n intent.INTENT_TURN_OFF, HA_DOMAIN, SERVICE_TURN_OFF, \"Turned {} off\"\n )\n )\n hass.helpers.intent.async_register(\n intent.ServiceIntentHandler(\n intent.INTENT_TOGGLE, HA_DOMAIN, SERVICE_TOGGLE, \"Toggled {}\"\n )\n )\n\n return True", "async def http_client(hass, hass_client_no_auth):\n await async_setup_component(hass, webhook.DOMAIN, {})\n return await hass_client_no_auth()", "def usingHandler(self, cmd):\n self.command_handler.handle_command(cmd)\n while msg_queue.empty() is False:\n self.writeresponse(msg_queue.get())", "async def add(request):\n pass", "def handler(event, context):\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def handle_intent(intent_name):\n if intent_name in name_to_handler:\n return name_to_handler[intent_name]()\n else:\n return question_answer(intent_name)", "async def echo(event):\n await event.respond(event.text)", "async def _async_process_intent(hass: HomeAssistant, domain: str, platform):\n await platform.async_setup_intents(hass)", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n attr = handler_input.attributes_manager.persistent_attributes\n speech_text = ''\n\n if not attr:\n # create a new one\n attr['character'] = Character().to_dict()\n handler_input.attributes_manager.persistent_attributes = attr\n\n speech_text += (\n \"Welcome to Daily Dungeon. \"\n \"Seems you don't have a character here, so I just created one for you. \")\n\n card_text='Character created successfully.'\n\n else:\n # load the char and claim trophy\n\n attr = handler_input.attributes_manager.persistent_attributes\n cur_char = Character(attr['character'])\n passing_time, loot_exp = cur_char.claim_loot()\n speech_text = 'Welcome to Daily Dungeon. '\n day = passing_time // (24 * 3600)\n hour = (passing_time % (24 * 3600)) // 3600\n minute = (passing_time % 3600) // 60\n if day > 1:\n speech_time = '{} days and {} hours'.format(day, hour)\n elif day == 1:\n speech_time = 'one day and {} hours'.format(hour)\n elif hour > 1:\n speech_time = '{} hours and {} minutes'.format(hour, minute)\n elif hour == 1:\n speech_time = 'one hour and {} minutes'.format(minute)\n else:\n speech_time = '{} minutes'.format(minute)\n\n speech_text += 'It\\'s been ' + speech_time + ' since your last login. '\n\n card_text = 'Offline time: ' + str(datetime.timedelta(seconds=passing_time)) + '\\nExp obtained:{} \\n'.format(loot_exp)\n\n if cur_char.messages:\n speech_text += 'You have unread messages. '\n card_text += 'You have unread messages. \\n'\n\n attr['character'] = cur_char.to_dict()\n\n if 'in_maze' in attr and (attr['in_maze'] == 'IN' or attr['in_maze'] == 'WAIT'):\n speech_text += 'You didnt finish your maze. Say resume the maze to go back to where you were. '\n card_text += 'You did not finish your maze. '\n attr['in_maze'] = 'WAIT'\n\n card = ui.SimpleCard(\n title='Welcome to Daily Dungeon',\n content=card_text\n )\n\n handler_input.attributes_manager.save_persistent_attributes()\n\n handler_input.response_builder.speak(\n speech_text).ask('what would you like to do').set_card(card)\n\n return handler_input.response_builder.response", "def start( self ):\n\t\treturn self.agi.answer().addCallbacks( self.onAnswered, self.answerFailure )", "def __init__(self, *, specified_loop=None):\n intents = discord.Intents(\n members=True,\n presences=True,\n guilds=True,\n emojis=True,\n invites=True,\n messages=True,\n reactions=True,\n voice_states=True,\n )\n loop = asyncio.get_event_loop()\n session = aiohttp.ClientSession(loop=loop)\n\n # Load all the environment variables\n load_dotenv(\"config/Bot/token.env\")\n load_dotenv(\"config/Apis/tokens.env\")\n load_dotenv(\"config/Database/db.env\")\n\n # Read the emoji file\n self.emoji_config = CustomEmojis.from_json(read_file(\"config/General/emojis.json\"))\n # Read the config file\n self.config = Config.from_json(read_file(\"config/General/config.json\"))\n\n # Set the HTTPException error codes dict to a custom property for easy access\n self.httpexception_codes = load_json(\"assets/data/httpexception_codes.json\", make_keys_int=True)\n\n # We save the bot start time to a variable\n self.started_at = datetime.datetime.utcnow()\n\n # APIs\n self.cleverbot = async_cleverbot.Cleverbot(\n os.environ[\"cleverbot\"],\n session=session,\n context=async_cleverbot.DictContext(),\n )\n self.dagpi = asyncdagpi.Client(os.environ[\"dagpi\"])\n self.google_api = async_cse.Search(os.environ[\"google_search\"], session=session)\n self.translate_api = aiogoogletrans.Translator()\n self.aki = Akinator()\n self.apis = [\"OMDB\", \"tenor\", \"owlbot\", \"gender_api\", \"nasa\"]\n self.api_keys = {api: os.environ[api.lower()] for api in self.apis}\n\n # For the snipe command\n self.snipes = {}\n\n # For tracking commands\n self.command_uses = {}\n\n # For api requests\n self.session = session\n\n super().__init__(\n command_prefix=get_prefix,\n case_insensitive=True,\n intents=intents,\n session=session,\n loop=specified_loop or loop,\n strip_after_prefix=True,\n owner_ids=self.config.owner_ids,\n )\n\n # For before_invoke\n self._before_invoke = self.before_invoke\n # For blacklisted check\n self._checks.append(self.bot_check)", "async def app_aclient(app_token):\n sender = tk.RetryingSender(sender=tk.AsyncSender())\n yield tk.Spotify(app_token, sender=sender)\n await sender.close()", "async def init_app():\n app = web.Application()\n\n # And... here our routes\n app.router.add_route(\n \"POST\", f\"/{ASTERISK_CALL_APP_ROUTE_ASTERISK_INIT}\", asterisk_init\n )\n app.router.add_route(\"POST\", f\"/{ASTERISK_CALL_APP_ROUTE_PLAY}\", asterisk_play)\n return app", "async def index():\n return \"Hello world\"", "def register_route(app):\n\n @app.teardown_appcontext\n def teardown_session(e):\n \"\"\"\n Exit the context of my_db and OT_spider when app's context is teared down.\n :param e: event.\n :return: None.\n \"\"\"\n my_db.close()\n OT_spider.close()\n\n @app.errorhandler(404)\n def page_not_found(e):\n \"\"\"\n Render assigned template when error code 404 occurs.\n :param e: error event.\n :return: error/404.html.\n \"\"\"\n return render_template(\"error/404.html\"), 404\n\n @app.errorhandler(403)\n def access_forbidden(e):\n \"\"\"\n Render assigned template when error code 403 occurs.\n :param e: error event.\n :return: error/403.html.\n \"\"\"\n return render_template(\"error/403.html\"), 403\n\n @app.errorhandler(500)\n def internal_server_error(e):\n \"\"\"\n Render assigned template when error code 500 occurs.\n :param e: error event.\n :return: error/500.html.\n \"\"\"\n return render_template(\"error/500.html\"), 500\n\n @app.before_request\n def filter_request():\n \"\"\"\n Intercept requests with disallowed methods and/or fake user agent.\n :return: None.\n \"\"\"\n if request.method not in ALLOWED_METHODS:\n return \"Method Not Allowed\", 405\n ua = str(request.user_agent)\n if \"Mozilla\" not in ua or \"Gecko\" not in ua:\n return \"No Scrappers!\", 403\n\n @app.after_request\n def set_res_headers(response):\n \"\"\"\n Set headers to all responses.\n :param response: flask.wrappers.Response object.\n :return: response to send back to client.\n \"\"\"\n response.headers[\"Server\"] = \"OurTieba\"\n response.headers[\"X-Content-Type-Options\"] = \"nosniff\"\n response.headers[\"X-Frame-Options\"] = \"sameorigin\"\n if app.config.get(\"ENABLE_CSP\"):\n response.headers[\"Content-Security-Policy\"] = \"script-src \" + \" \".join(WHITELIST) + \"; object-src 'self'\"\n return response\n\n @app.template_filter(\"index_format\")\n def add_zeros(i, length): # format index in photos.html\n \"\"\"\n Pad zeros to i, and turn it into a string. The length is at least 2. Used in photos.html.\n :param i: int. Integer to pad.\n :param length: int. Base integer.\n :return: A padded string.\n\n For example,\n add_zeros(1, 2) -> \"01\";\n add_zeros(1, 12) -> \"01\";\n add_zeros(13, 101) -> \"013\".\n \"\"\"\n return (\"{:0>\" + str(max(len(str(length)), 2)) + \"d}\").format(i)", "async def main(event):\n if conf.MATRIX_PW:\n LOGGER.info(f\"Log in {conf.MATRIX_ID=} on {conf.MATRIX_URL=}\")\n await utils.CLIENT.login(conf.MATRIX_PW)\n else:\n LOGGER.info(f\"Restoring log in {conf.MATRIX_ID=} on {conf.MATRIX_URL=}\")\n utils.CLIENT.access_token = conf.MATRIX_TOKEN\n\n server = web.Server(handler.matrix_webhook)\n runner = web.ServerRunner(server)\n await runner.setup()\n LOGGER.info(f\"Binding on {conf.SERVER_ADDRESS=}\")\n site = web.TCPSite(runner, *conf.SERVER_ADDRESS)\n await site.start()\n\n # Run until we get a shutdown request\n await event.wait()\n\n # Cleanup\n await runner.cleanup()\n await utils.CLIENT.close()", "def all_exception_handler(handler_input, exception):\n # type: (HandlerInput, Exception) -> None\n print(\"Encountered following exception: {}\".format(exception))\n\n speech = \"Mi dispiace, c'è stato un problema!!\"\n handler_input.response_builder.set_should_end_session(True)\n handler_input.response_builder.speak(speech).ask(speech)\n\n return handler_input.response_builder.response", "def fallback_handler(handler_input):\n speech_text = \"See you later! Enjoy the hackathon.\"\n\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Hello World\", speech_text)).set_should_end_session(\n True)\n return handler_input.response_builder.response", "def help_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n # TODO: set a speech here\n speech_text = (\n \"Here should be a help speech.\")\n reprompt = \"Here should be a help reprompt.\"\n\n handler_input.response_builder.speak(speech_text).ask(reprompt)\n return handler_input.response_builder.response", "def handler(self):\n\t\treturn self.handle_request", "async def handle(request):\n text = 'Japronto server running on {0} port. Hello, {1}'.format(\n str('??????'), str(request.match_dict['name']))\n return request.Response(text=text)", "async def _handle_request(self, request: web.Request) -> web.Response:\n event = await request.json()\n # This handler will be called on the server thread. Call the external\n # handler on the app thread.\n self._main_loop.call_soon_threadsafe(self.handle_event, event)\n return web.Response(text=\"OK\")", "async def main() -> None:\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n )\n logger.error(\"Starting the bot\")\n config = load_config(\"bot.ini\")\n\n if config.tg_bot.use_redis:\n storage = RedisStorage2()\n else:\n storage = MemoryStorage()\n\n pool = await create_pool(\n user=config.db.user,\n password=config.db.password,\n database=config.db.database,\n host=config.db.host,\n )\n\n bot = Bot(token=config.tg_bot.token, parse_mode=ParseMode.HTML)\n dp = Dispatcher(bot=bot, storage=storage)\n\n dp.middleware.setup(DbMiddleware(pool))\n dp.middleware.setup(RoleMiddleware(config.tg_bot.admin_id))\n dp.filters_factory.bind(RoleFilter)\n\n admin.register_admin(dp)\n user.register_user(dp)\n await set_basic_commands(dp)\n\n try:\n await dp.start_polling(allowed_updates=False)\n finally:\n await dp.storage.close()\n await dp.storage.wait_closed()\n await bot.session.close()", "def handle(req):\n return logic(req)", "async def respond(self, ctx, index, *, response):\n try:\n config = self.bot.db['questions'][str(ctx.guild.id)][str(ctx.channel.id)]\n except KeyError:\n return\n if not response:\n await hf.safe_send(ctx, \"You need to type something for your response.\")\n return\n if len(response.split()) == 1:\n try:\n msg = await ctx.channel.fetch_message(int(response))\n await ctx.message.add_reaction('⤴')\n ctx.message = msg\n ctx.author = msg.author\n response = msg.content\n except (discord.NotFound, ValueError):\n pass\n if index not in config['questions']:\n await hf.safe_send(ctx, \"Invalid question index. Make sure you're typing this command in the channel \"\n \"the question was originally made in.\")\n return\n\n try:\n log_channel = ctx.guild.get_channel(config['log_channel'])\n except discord.NotFound:\n await hf.safe_send(ctx, \"The original log channel can't be found (type `;q setup`)\")\n return\n try:\n log_message = await log_channel.fetch_message(config['questions'][index]['log_message'])\n except discord.NotFound:\n await hf.safe_send(ctx, \"The original question log message could not be found. Type `;q a <index>` to \"\n \"close the question and clear it.\")\n return\n\n emb: discord.Embed = log_message.embeds[0]\n value_text = f\"⁣⁣⁣\\n[Jump URL]({ctx.message.jump_url})\"\n emb.add_field(name=f\"Response by {ctx.author.name}#{ctx.author.discriminator}\",\n value=value_text.replace('⁣⁣⁣', response[:1024-len(value_text)]))\n await log_message.edit(embed=emb)\n config['questions'][index].setdefault('responses', []).append(ctx.message.jump_url)\n await self._delete_log(ctx)\n await self._post_log(ctx)\n await ctx.message.add_reaction('✅')", "def setup(hass, config):\n hass.http.register_view(APIAIWebhookView)\n return True", "def register_handler(config):\n\n @respond_to(\".*\")\n def handle(message):\n \"\"\"Respond to every Slack message and dispatch to another handler based\n on the contents of the message.\n\n This duplicates a little bit of the work that slackbot does, but allows\n us to define handlers dynamically based on the job config.\n \"\"\"\n\n text = message.body[\"text\"]\n logger.info(\"Received message\", message=text)\n\n if text == \"status\":\n handle_status(message)\n return\n\n for slack_config in config[\"slack\"]:\n if slack_config[\"regex\"].match(text):\n handle_command(message, slack_config)\n return\n\n for namespace, help_config in config[\"help\"].items():\n for pattern in [\"^{} help$\", \"^help {}$\"]:\n if re.match(pattern.format(namespace), text):\n handle_namespace_help(message, help_config)\n return\n\n include_apology = text != \"help\"\n handle_help(message, config[\"help\"], include_apology)", "def init(loop):\n tasks = JobsHandler()\n config = ConfigHandler()\n task = TaskHandler()\n\n\n\n app = web.Application(loop = loop)\n app.router.add_route('*', '/tasks/{do_something}', tasks.handle)\n app.router.add_route('*', '/config/{do_something}', config.handle)\n app.router.add_route('*', '/task/{id}/{do_something}', task.handle)\n\n handler = app.make_handler()\n srv = yield from loop.create_server(handler, '0.0.0.0', 8080)\n print(\"Server started at http://0.0.0.0:8080\")\n return srv, handler", "def alexa_handler(event, context, env_vars=None):\n\n if env_vars is None: # pragma: no cover\n env_vars = os.environ\n\n setup_logging()\n\n # If calling from a scheduled event, this is only a 'warmup' call\n if event.get('detail-type') == 'Scheduled Event':\n logging.info('Warmup only, returning early')\n return\n\n logging.debug('Event:\\n%s', json.dumps(event))\n\n latitude, longitude = get_geo_coordinates(event=event)\n response = query_dark_sky(latitude, longitude)\n weather = parse_weather(response)\n to_speak = build_text_to_speak(weather)\n\n return {\n 'version': '1.0',\n 'response': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': to_speak\n }\n }\n }", "def on_intent(event_request, session):\n print(\"=====on_intent requestId: \" + event_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = event_request['intent']\n intent_name = event_request['intent']['name']\n print(\"=====intent is: \" + intent_name)\n\n if intent_name == \"AnswerIntent\":\n print(\"=====AnswerIntent fired...\")\n if 'attributes' in session:\n if 'questions' in session['attributes']:\n return handle_answer_request(intent, session)\n\n # we probably got here because user said something other than\n # yes or no after asking if they wanted to play the game again\n print(\"=====no attributes ending game\")\n return play_end_message()\n if intent_name == \"GameIntent\":\n print(\"=====GameIntent fired...\")\n # if there's a session and we're in a game treat this as an answer\n # unfortunately it will be wrong but it's better than starting over\n if 'attributes' in session:\n if session['attributes']['game_status'] == \"in_progress\":\n return handle_answer_request(intent, session)\n return play_new_game(False)\n if intent_name in (\"AMAZON.StartOverIntent\", \"AMAZON.YesIntent\"):\n print(\"=====StartOverIntent or YesIntent fired...\")\n return play_new_game(True)\n if intent_name == \"AMAZON.NoIntent\":\n print(\"=====NoIntent fired...\")\n # if there's a session and we're in a game treat this as a wrong answer\n if 'attributes' in session:\n if session['attributes']['game_status'] == \"in_progress\":\n return handle_answer_request(intent, session)\n # otherwise end the game\n return play_end_message()\n if intent_name in (\"AMAZON.StopIntent\", \"AMAZON.CancelIntent\"):\n print(\"=====StopIntent or CancelIntent fired\")\n return play_end_message()\n if intent_name == 'AMAZON.HelpIntent':\n print(\"=====HelpIntent...\")\n tts = \"During the game I'll give you 6 random brain teasers and only 8 \"\\\n \"seconds to anser each one... To make your mind muscles stronger, I \"\\\n \"won't repeat any of the questions, so try to remember all the \"\\\n \"details... You can say 'Start Over' if you'd like a new game, \"\\\n \"or make your guess for the last question...\"\n return speech(tts, session['attributes'], False, None)", "async def post(self, request, data):\n hass = request.app[\"hass\"]\n\n try:\n intent_name = data[\"name\"]\n slots = {\n key: {\"value\": value} for key, value in data.get(\"data\", {}).items()\n }\n intent_result = await intent.async_handle(\n hass, DOMAIN, intent_name, slots, \"\", self.context(request)\n )\n except intent.IntentHandleError as err:\n intent_result = intent.IntentResponse()\n intent_result.async_set_speech(str(err))\n\n if intent_result is None:\n intent_result = intent.IntentResponse()\n intent_result.async_set_speech(\"Sorry, I couldn't handle that\")\n\n return self.json(intent_result)", "async def ask(self, ctx: commands.Context, *, question: str):\n # Check for cooldown\n await self.check_cooldown(ctx)\n\n # Create question context and contact API\n context = contexts.create_question_context(self.bot.config.data_path, question, self.bot.user.display_name)\n async with ctx.typing():\n result = await utils.create_completion_result_from_context(self.bot.loop, context)\n await ctx.send(result)", "def on_intent(request, session):\n\n intent_name = request['intent']['name']\n \n # process the intents\n if intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n \n elif intent_name == \"AMAZON.StopIntent\":\n return get_stop_response()\n \n elif intent_name == \"AMAZON.CancelIntent\":\n return get_stop_response()\n \n elif intent_name == \"AMAZON.FallbackIntent\":\n return get_fallback_response()\n \n elif intent_name == \"recognizeDates\":\n slots = request['intent']['slots']\n date_start_slot = slots.get('dateStart',{'value':'NA'}).get('value','NA')\n date_end_slot = slots.get('dateEnd',{'value':'NA'}).get('value','NA')\n\n return get_intent_response(date_start_slot,date_end_slot)\n \n elif intent_name == \"PollHprofs\":\n slots = request['intent'].get('slots','')\n print(slots)\n speechOutput = \"Under development\"\n return response(speech_response(speechOutput, True))\n\n elif intent_name == \"SpinVMs\":\n slots = request['intent'].get('slots','')\n print(slots)\n speechOutput = \"Under development\"\n return response(speech_response(speechOutput, True))\n\n else:\n print(\"For invalid Intents reply with help\")\n return get_help_response()", "async def fancysay(self, ctx):", "def init_bot(self):\n dispatcher = self.updater.dispatcher\n\n dispatcher.add_handler(CommandHandler(\"start\", self.on_bot_start))\n dispatcher.add_handler(CommandHandler(\"help\", self.on_bot_help))\n dispatcher.add_handler(CommandHandler(\"about\", self.on_bot_about))\n dispatcher.add_handler(CommandHandler(\"vreausaajut\", self.on_bot_offer_to_help))\n dispatcher.add_handler(CommandHandler(\"status\", self.on_status))\n dispatcher.add_handler(CommandHandler(\"Da\", self.on_accept))\n dispatcher.add_handler(CommandHandler(\"Nu\", self.on_reject))\n\n dispatcher.add_handler(CallbackQueryHandler(self.negotiate_time, pattern=\"^eta.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_dispatch, pattern=\"^caution.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_handle, pattern=\"^handle.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_wellbeing, pattern=\"^state.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_symptom, pattern=\"^symptom.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_wouldyou, pattern=\"^wouldyou.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_further, pattern=\"^further.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_activities, pattern=\"^assist.*\"))\n\n dispatcher.add_handler(MessageHandler(Filters.photo, self.on_photo))\n dispatcher.add_handler(MessageHandler(Filters.contact, self.on_contact))\n dispatcher.add_handler(MessageHandler(Filters.text, self.on_text_message))\n dispatcher.add_error_handler(self.on_bot_error)", "def main():\n\n async def shutdown():\n server.stop()\n await tornado.gen.sleep(_SHUTDOWN_TIMEOUT)\n tornado.ioloop.IOLoop.current().stop()\n LOGGER.info(\"Server was successfully shut down.\")\n\n def exit_handler(sig, frame): # pylint: disable=unused-argument\n def get_sig_name(sig):\n return dict((k, v) for v, k in reversed(sorted(signal.__dict__.items()))\n if v.startswith('SIG') and not v.startswith('SIG_')).pop(sig)\n\n LOGGER.warning(\"Registered %s, shutting down.\", get_sig_name(sig))\n tornado.ioloop.IOLoop.instance().add_callback_from_signal(shutdown)\n\n signal.signal(signal.SIGTERM, exit_handler)\n signal.signal(signal.SIGINT, exit_handler)\n\n init_logging()\n cryptochat_db = DB(DATABASE_LOCATION)\n\n cryptochat_app = Application()\n server = tornado.httpserver.HTTPServer(cryptochat_app)\n server.bind(PUBLIC_API_PORT)\n server.start()\n LOGGER.info(\"Starting cryptochat (version %s).\", SERVER_VERSION)\n\n BaseHandler.messages_new_api = MessagesNewAPI(cryptochat_db)\n BaseHandler.messages_updates_api = MessagesUpdatesAPI(cryptochat_db)\n BaseHandler.users_api = UsersAPI(cryptochat_db)\n BaseHandler.chats_api = ChatsAPI(cryptochat_db)\n BaseHandler.chats_user_api = ChatsUserAPI(cryptochat_db)\n BaseHandler.contacts_new_api = ContactsAPI(cryptochat_db)\n\n tornado.ioloop.IOLoop.current().start()", "def index():\n return Response(\n \"Welcome to basic-http-server, you're ready to add some methods!\\n\" +\n str(request) + \"\\n\", mimetype='text/plain'\n )", "def launch_request_handler(handler_input):\n return launch_request(handler_input)", "def on_post(self, req, resp):\n # A map of supported actions to the handlers for tasks for those actions\n supported_actions = {\n 'validate_design': TasksResource.task_validate_design,\n 'verify_site': TasksResource.task_verify_site,\n 'prepare_site': TasksResource.task_prepare_site,\n 'verify_nodes': TasksResource.task_verify_nodes,\n 'prepare_nodes': TasksResource.task_prepare_nodes,\n 'deploy_nodes': TasksResource.task_deploy_nodes,\n 'destroy_nodes': TasksResource.task_destroy_nodes,\n 'relabel_nodes': TasksResource.task_relabel_nodes,\n }\n\n try:\n json_data = self.req_json(req)\n\n action = json_data.get('action', None)\n if supported_actions.get(action, None) is None:\n self.error(req.context, \"Unsupported action %s\" % action)\n self.return_error(resp,\n falcon.HTTP_400,\n message=\"Unsupported action %s\" % action,\n retry=False)\n else:\n supported_actions.get(action)(self, req, resp, json_data)\n except Exception as ex:\n self.error(\n req.context,\n \"Unknown error: %s\\n%s\" % (str(ex), traceback.format_exc()))\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Unknown error\",\n retry=False)", "def main() -> None:\n try:\n config = Config.load_config()\n asyncio.run(App(config=config, no_history=False).run())\n except ClientError:\n raise\n except Exception as e:\n raise Bug(str(e))", "def main():\n\n updater = Updater(token=telegram_token)\n\n # Get the dispatcher to register handlers\n dispatch = updater.dispatcher\n\n # on different commands - answer in Telegram\n start_handler = CommandHandler('start', start)\n dispatch.add_handler(start_handler)\n\n location_handler = MessageHandler(Filters.location, location)\n dispatch.add_handler(location_handler)\n\n respond_handler = MessageHandler(Filters.text, respond)\n dispatch.add_handler(respond_handler)\n\n help_handler = CommandHandler('help', help)\n dispatch.add_handler(help_handler)\n\n find_handler = CommandHandler('find', findlocation, pass_args=True)\n dispatch.add_handler(find_handler)\n\n unknown_handler = MessageHandler(Filters.command, unknown)\n dispatch.add_handler(unknown_handler)\n\n # log all errors\n dispatch.add_error_handler(error)\n\n #PROD\n port_number = int(os.environ.get('PORT', '5000'))\n updater.start_webhook(listen=\"0.0.0.0\",\n port=port_number,\n url_path=telegram_token)\n updater.bot.setWebhook(\"https://dashproject.herokuapp.com/\" + telegram_token)\n updater.idle()", "def setup(cls):\n super().setup()\n cls.http_handler = cast(\n HttpHandler, cls._skill.skill_context.handlers.http_handler\n )\n cls.logger = cls._skill.skill_context.logger\n\n cls.http_dialogues = cast(\n HttpDialogues, cls._skill.skill_context.http_dialogues\n )\n\n cls.get_method = \"get\"\n cls.post_method = \"post\"\n cls.url = \"some_url\"\n cls.version = \"some_version\"\n cls.headers = \"some_headers\"\n cls.body = b\"some_body\"\n cls.sender = \"fetchai/some_skill:0.1.0\"\n cls.skill_id = str(cls._skill.skill_context.skill_id)\n\n cls.status_code = 100\n cls.status_text = \"some_status_text\"\n\n cls.content = b\"some_content\"\n cls.list_of_messages = (\n DialogueMessage(\n HttpMessage.Performative.REQUEST,\n {\n \"method\": cls.get_method,\n \"url\": cls.url,\n \"version\": cls.version,\n \"headers\": cls.headers,\n \"body\": cls.body,\n },\n ),\n )", "async def test_intent(self, dm):\n request = create_request(\"other\", \"intent\")\n result = await dm.apply_handler(request, create_responder(request))\n assert result.dialogue_state == \"intent\"", "def handle_answer_request(intent, session):\n\n eins_list = [\"eins\", \"ein\", \"einer\", \"eine\", \"einen\", \"eines\", \"einem\"]\n \n if intent[\"name\"] == \"DontKnowIntent\":\n answer = \"weiß nicht\"\n elif \"Nummer\" in intent[\"slots\"].keys() and \"value\" in intent[\"slots\"][\"Nummer\"]:\n answer = intent[\"slots\"][\"Nummer\"][\"value\"]\n elif \"Antworten\" in intent[\"slots\"].keys() and \"value\" in intent[\"slots\"][\"Antworten\"]:\n answer = intent[\"slots\"][\"Antworten\"][\"value\"]\n else:\n answer = \"Fehler\"\n \n #Necessary to recognize \"1\":\n if answer in eins_list:\n answer = \"1\"\n elif answer == \"ein mal\":\n answer = \"einmal\"\n answer = answer.lower()\n\n print(\"handle_answer_request: \", intent, \"answer: \", answer)\n\n if \"attributes\" not in session:\n return start_game(answer, session)\n elif session[\"attributes\"][\"state\"] == \"Gameon\":\n return check_answer(answer, session)\n elif session[\"attributes\"][\"state\"] == \"Start\":\n return start_game(answer, session)\n\n return start_game(answer, session)", "def main():\n # load dotenv as environment variable\n dotenv_path = join(dirname(__file__), '.env')\n load_dotenv(dotenv_path)\n # Create the Updater and pass it your bot's token.\n # getting bot api token from environment variable\n updater = Updater(os.environ.get(\"BOT_API_TOKEN\"), use_context=True)\n\n # creating the ConversationHandler with entrypoints and multiple stages\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n states={\n FIRST: [CallbackQueryHandler(show_categories, pattern='^' + str(ONE) + '$'),\n CallbackQueryHandler(cart_inline_keyboard, pattern='^' + str(TWO) + '$'),\n CallbackQueryHandler(show_cart, pattern='^' + str(THREE) + '$'),\n CallbackQueryHandler(show_group_cart, pattern='^' + str(FOUR) + '$'),\n CallbackQueryHandler(finish_question, pattern='^' + str(FIVE) + '$'),\n CallbackQueryHandler(clear_all_question, pattern='^' + str(SIX) + '$')],\n SECOND: [CallbackQueryHandler(show_category)],\n THIRD: [CallbackQueryHandler(add_to_cart)],\n FOURTH: [CallbackQueryHandler(remove_from_cart)],\n FIFTH: [CallbackQueryHandler(clear_all)],\n SIXTH: [CallbackQueryHandler(finish)],\n SEVENTH: [CallbackQueryHandler(start_over, pattern='^' + str(ONE) + '$'),\n CallbackQueryHandler(show_categories, pattern='^' + str(TWO) + '$'),\n CallbackQueryHandler(cart_inline_keyboard, pattern='^' + str(THREE) + '$')]\n },\n fallbacks=[CommandHandler('start', start)]\n )\n\n # adding handlers to the bot\n updater.dispatcher.add_handler(conv_handler)\n updater.dispatcher.add_handler(CommandHandler('help', help))\n updater.dispatcher.add_error_handler(error)\n\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until the user presses Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT\n updater.idle()", "def main():\n\n addon_url = sys.argv[0]\n addon_handle = int(sys.argv[1])\n addon_args = urlparse.parse_qs(sys.argv[2][1:])\n\n # Route request to action.\n Plugin(addon_url, addon_handle, addon_args).route()", "def all_exception_handler(handler_input, exception):\n # type: (HandlerInput, Exception) -> Response\n logger.error(exception, exc_info=True)\n\n speech = \"Sorry, there was some problem. Please try again!!\"\n handler_input.response_builder.speak(speech).ask(speech)\n\n return handler_input.response_builder.response", "def async_setup(hass, config):\n warnings.filterwarnings('ignore', module='fuzzywuzzy')\n\n config = config.get(DOMAIN, {})\n intents = hass.data.get(DOMAIN)\n\n if intents is None:\n intents = hass.data[DOMAIN] = {}\n\n for intent_type, utterances in config.get('intents', {}).items():\n conf = intents.get(intent_type)\n\n if conf is None:\n conf = intents[intent_type] = []\n\n conf.extend(_create_matcher(utterance) for utterance in utterances)\n\n @asyncio.coroutine\n def process(service):\n \"\"\"Parse text into commands.\"\"\"\n text = service.data[ATTR_TEXT]\n yield from _process(hass, text)\n\n hass.services.async_register(\n DOMAIN, SERVICE_PROCESS, process, schema=SERVICE_PROCESS_SCHEMA)\n\n hass.http.register_view(ConversationProcessView)\n\n hass.helpers.intent.async_register(TurnOnIntent())\n hass.helpers.intent.async_register(TurnOffIntent())\n async_register(hass, INTENT_TURN_ON,\n ['Turn {name} on', 'Turn on {name}'])\n async_register(hass, INTENT_TURN_OFF, [\n 'Turn {name} off', 'Turn off {name}'])\n\n return True", "def on_event():\n\n event = request.get_json()\n \n token_status, token_text = validate_token()\n\n if token_status != 0:\n return json.jsonify({'text': token_text})\n\n if event['type'] == 'ADDED_TO_SPACE' and event['space']['type'] == 'ROOM':\n text = 'Thanks for adding me to \"%s\"! For help type @bot help' % event['space']['displayName']\n \n elif event['type'] == 'MESSAGE':\n\n room_name = event['space']['name'].split('/')[1]\n commands = ['list', 'add', 'remove', 'help']\n\n try:\n param = event['message']['text'].split()[1:][0]\n except:\n text = _help()\n return json.jsonify({'text': text})\n\n if param in commands:\n\n if param == 'list':\n text = _list(room_name)\n\n elif param == 'add':\n text = _add(event, room_name)\n\n elif param == 'remove':\n text = _remove(event, room_name)\n\n elif param == 'help':\n text = _help()\n return json.jsonify({'text': text})\n \n else:\n text = send_msg(event, room_name)\n\n else:\n return\n \n return json.jsonify({'text': text})", "def handle(self, handler_input):\n speech = \"I'm a sample Alexa Skill. Let me give you a random Chuck Norris Fact. \"\n speech += getChuckFact()\n speech += \". Do you want more awesome Chuck facts?\"\n \n \"\"\"\n Take note of the set_should_end_session. If set to 'True', the alexa\n skill will gracefully end execution.AbstractExceptionHandler\n \n The set_card method specifies what kind of cards do you want to use when\n interacting with the user via display. A 'SimpleCard' display's text.\n \n For more info about cards, see:\n https://developer.amazon.com/docs/custom-skills/include-a-card-in-your-skills-response.html\n \"\"\"\n handler_input.response_builder.speak(speech).set_card(\n SimpleCard(speech)).set_should_end_session(False)\n return handler_input.response_builder.response", "async def introduce_myself():\n return INTRODUCTION_FORM", "def main(app_handlers=None, app_settings=None, use_curl=False):\n app_handlers = app_handlers or []\n app_settings = app_settings or {}\n api = BiothingsAPI(options.conf)\n\n if app_settings:\n api.settings.update(app_settings)\n if app_handlers:\n api.handlers = app_handlers\n if use_curl:\n api.use_curl()\n\n api.host = options.address\n api.update(debug=options.debug)\n api.update(autoreload=options.autoreload)\n api.start(options.port)", "def init_app(self, app):\n # Avoid double initialization\n if self._tornado_app is app:\n return None\n if self._tornado_app is not None:\n raise RuntimeError(\n \"This API has already been registered on a tornado application.\"\n )\n\n self._tornado_app = app\n app.settings[\"jsonapi\"] = self\n\n # Add the handler.\n url_rule = tornado.web.url(\n self.uri + \"/.*\", Handler, dict(jsonapi=self), name=\"jsonapi\"\n )\n app.add_handlers(\".*\", [url_rule])\n return None", "async def _bot():\n await bot.say('Yes, the bot is cool.')", "def on_get(self, req, resp, task_id):\n task_result = AsyncResult(task_id)\n result = {'status': task_result.status, 'result': task_result.result}\n resp.status = falcon.HTTP_200\n resp.body = json.dumps(result)", "def query_handler(self, handler_name=\"\"):\n\t\treturn False", "def handler(event, context):\n return awsgi.response(app, event, context)", "def __luanch_handlers(self):\n\n self.__updater = Updater(self.__token, use_context=True)\n self.__dp = self.__updater.dispatcher\n # on different commands - answer in Telegram\n self.__dp.add_handler(CommandHandler(\"start\", self.start_message))\n self.__dp.add_handler(CommandHandler(\"help\", self.help))\n self.__dp.add_handler(CommandHandler(\"history\", self.history))\n self.__dp.add_handler(CommandHandler(\"request\", self.request))\n self.__dp.add_handler(CommandHandler(\"cancel\", self.cancel))\n self.__dp.add_handler(CommandHandler(\"show\", self.show))\n self.__dp.add_handler(CommandHandler(\"promote\", self.promote))\n self.__dp.add_handler(CommandHandler(\"demote\", self.demote))\n self.__dp.add_handler(CommandHandler(\"checkadmin\", self.check_admin))\n self.__dp.add_handler(CommandHandler(\"kick\", self.kick))\n self.__dp.add_handler(CommandHandler(\"stop\", self.stop_all))\n self.__dp.add_handler(CommandHandler(\"whatsmyid\", self.__whatsmyid))\n self.__updater.start_polling()", "def fallback_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech = (\n \"The {} skill can't help you with that. \"\n \"I can look up a word in the dictionary for you\").format(skill_name)\n reprompt = (\"I can look up a word in the dictionary, \"\n \"Just say any word in English\")\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response" ]
[ "0.5878627", "0.5799518", "0.57429504", "0.57207054", "0.5717911", "0.5691735", "0.5681638", "0.5614569", "0.55652535", "0.5559621", "0.5534234", "0.5512763", "0.5455151", "0.5423512", "0.5397953", "0.5394697", "0.5374808", "0.5370889", "0.53686166", "0.53405356", "0.5336915", "0.5332253", "0.53307897", "0.53307897", "0.53307897", "0.53082174", "0.53078187", "0.52974576", "0.5297153", "0.52573425", "0.5254196", "0.52334255", "0.52308315", "0.52286583", "0.52229136", "0.5206533", "0.5205122", "0.5192848", "0.518281", "0.518281", "0.51571274", "0.51386327", "0.513401", "0.51219285", "0.5121574", "0.5113948", "0.51076263", "0.5089747", "0.5086974", "0.50753355", "0.5065912", "0.50646913", "0.5057889", "0.5056401", "0.5053688", "0.50524837", "0.50509423", "0.5042865", "0.5028519", "0.5024675", "0.50195503", "0.50183386", "0.50154954", "0.50037235", "0.5003393", "0.49947083", "0.4993058", "0.49913883", "0.4989107", "0.49838713", "0.4982458", "0.49755946", "0.49710572", "0.4971051", "0.49684104", "0.49647772", "0.49640697", "0.4959243", "0.49572003", "0.49563402", "0.495472", "0.4947828", "0.49470133", "0.49428767", "0.49428746", "0.49370077", "0.49336466", "0.4929395", "0.4923418", "0.4922778", "0.4919624", "0.49169612", "0.49085975", "0.49083695", "0.49017686", "0.48955244", "0.48951855", "0.489334", "0.48910445", "0.487837" ]
0.6682835
0
Start the application and connect to the server
def connect(self): Log.info(f'Connecting to Kodeventure server at {SERVER_HOST}') web.run_app( self.aiohttp, host=PLAYER_HOST, port=PLAYER_PORT, ssl_context=self.cert )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])", "def start_server(self):\n app.run(host=str(self.__constants.host),\n port=int(self.__constants.port),\n debug=bool(self.__constants.runindebug))", "def start(self):\n run(self.app, host=self.host, port=self.port, server=AsyncServer,\n quiet=True, debug=False)", "def connect_to_server(self):\n\t\tself.outside.start()\n\t\tself.outside.register(self.config.server_ip, self.config.server_port)\n\n\t\tself.thin.start()\n\t\tself.thin.register(self.config.server_ip, self.config.server_port)", "def start():\n\n start_server()", "def start() -> None:\n from app import app\n app.run(debug = True, host = HOST, port = PORT)", "def run():\r\n log.debug('Starter::run()')\r\n try:\r\n # check specified port\r\n if not conf.port:\r\n raise Exception(\"Please specify port number! (use --port)\")\r\n Server(conf.port).run()\r\n except Exception as E:\r\n log.critical(E)", "def start(self):\n self.serve_forever()", "def start(self):\n self.serve_forever()", "def run(self):\n self._server = self._get_server()\n self._server.serve_forever()", "def run(self):\n self.app.run(host=\"0.0.0.0\")", "def main():\n s = start_server()\n accept_connection(s)", "def run_server():\n app = init_app()\n app.run(host=app.config['HOST'], port=app.config['PORT'])", "def start(self):\n self.port = self.conn.evalInServer(server_code.format(key=self.key))", "def run(self, **options):\n try:\n application = App()\n application.listen(self.port, self.addr)\n tornado.ioloop.IOLoop.current().start()\n except Exception, e:\n self.stdout.write('run error. Detail error info as follows: %s' % e)", "def run_server(self, _):\n if not ENABLE_SERVER:\n logger.info('server not enabled, exit')\n return\n app.run(host=API_HOST, port=API_PORT, threaded=API_THREADED)", "def run(self):\n self.connect()", "def start(self):\n if config['port'] or config['host']:\n port = config['port'] or 5222\n host = config['host'] or sleekxmpp.JID(config['jid']).host\n addr = (host, port)\n else:\n addr = tuple()\n self.connect(addr)\n self.process(threaded=True)", "def start(self):\n\n self.app = Application()\n self.app._loop = self.loop\n self.add_routes()\n self.app.run(port=int(self.port),\n worker_num=None,\n reload=False,\n debug=False)\n # GZip support\n # Compress(self.app)\n # self.app.config['COMPRESS_MIMETYPES'] = {'text/html',\n # 'application/json'}\n # self.app.config['COMPRESS_LEVEL'] = 4\n # self.app.config['COMPRESS_MIN_SIZE'] = 300\n # Session support\n # self.session_interface = InMemorySessionInterface()\n # self.app.response_middleware.appendleft(self.save_session)\n # self.app.request_middleware.append(self.add_session_to_request)\n\n # self.add_routes()\n # return await self.app.create_server(loop=self.loop,\n # host='0.0.0.0',\n # port=self.port,\n # debug=False)", "def startapp():", "def start(self):\n #url = '{}://{}:{}/'.format('http',\n # self.ip,\n # self.port)\n #self.service_info = ServiceInfo(\n # '_webthing._sub._http._tcp.local.',\n # '{}._http._tcp.local.'.format(self.name),\n # address=socket.inet_aton(self.ip),\n # port=self.port,\n # properties={\n # 'url': url,\n # },\n # server='{}.local.'.format(socket.gethostname()))\n #self.zeroconf = Zeroconf()\n #self.zeroconf.register_service(self.service_info)\n\n # If WebSocketS used and NOT running in thread, and WebServer IS\n # running in thread make shure WebServer has enough stack size to\n # handle also the WebSocket requests.\n log.info('Starting Web Server')\n self.server.Start(threaded=srv_run_in_thread, stackSize=8192)", "def __run_server(self):\n os.chdir(os.path.dirname(self.server_path))\n self.server_process = subprocess.Popen([self.server_path, \\\n \"{}:{}\".format(self.args.ip, self.args.port)])", "def main():\n\n global _CLIENT\n\n logging.basicConfig(level=logging.DEBUG)\n app.logger.setLevel(logging.INFO)\n\n _CLIENT = Client('192.168.0.120', 443, 'root', 'calvin')\n _CLIENT.connect()\n\n\n app.run(debug=True)", "def start_server():\n server.bind(constants.ADDRESS)\n server.listen()\n print(\"Server listening on: \" + constants.HOST + \" on port \" + str(constants.PORT) + \"...\")", "def start(self) -> None:\n app = web.Application()\n app.add_routes([web.post(\"/\", self._handle_request)])\n self._runner = web.AppRunner(app)\n\n self._startup_event = threading.Event()\n self._server_loop = asyncio.new_event_loop()\n t = threading.Thread(target=self._run)\n t.start()\n\n # Wait for server to startup\n self._startup_event.wait()", "def connect(self):\n self.start()", "def start(self):\n # create socket\n try:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # 10 minutes for timeout\n self._socket.settimeout(600)\n except socket.error as msg:\n logging.error(\"Can't create socket. Error code: {}, msg: {}\".format(*msg))\n raise\n\n # Open TCP connection\n try:\n self._socket.connect(self.address)\n except socket.error:\n logging.error(\"Can't connect to the server on {}:{}\".format(*self.address))\n raise", "def start(self):\n self.conn.start()", "def run():\n app = Application()\n #app.sentry_client = AsyncSentryClient(app.settings['sentry_url'])\n http_server = HTTPServer(app, xheaders=True)\n http_server.listen(options.port)\n print('Running on port %d' % options.port)", "def start(self):\n assert(self._cbs is not None)\n self._as.start() # start the server", "def run(self):\n self.connect()\n self.run_forever()", "def start(self) -> None:\n if self.bolt_app.logger.level > logging.INFO:\n print(get_boot_message())\n else:\n self.bolt_app.logger.info(get_boot_message())\n\n web.run_app(self.web_app, host=\"0.0.0.0\", port=self.port)", "def webserver_start():\n run(_webserver_command())", "async def _start_server(self) -> None:\n # First, figure out what address to listen on. Open a connection to\n # the Hubitat hub and see what address it used. This assumes this\n # machine and the Hubitat hub are on the same network.\n with _open_socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.connect((self.host, 80))\n address = s.getsockname()[0]\n\n self._server = server.create_server(\n self._process_event, address, self.port or 0, self.ssl_context\n )\n self._server.start()\n _LOGGER.debug(\n \"Listening on %s:%d with SSL %s\",\n address,\n self._server.port,\n \"disabled\" if self.ssl_context is None else \"enabled\",\n )\n\n await self.set_event_url(self.event_url)", "def run(self):\n self.__server.serve_forever()", "def start():\n app.run()", "def start(self):\n\n self.app.go()", "def run():\n\n # Construct a server.\n server = wsgiref.simple_server.make_server(\n _config[ 'address' ],\n _config[ 'port' ],\n application\n )\n\n # Run the server.\n server.serve_forever()\n\n # Return result.\n return 0", "def run():\n import argparse\n\n parser = argparse.ArgumentParser(description='Phovea Server')\n parser.add_argument('--use_reloader', action='store_true', help='whether to automatically reload the server')\n parser.add_argument('--env', default=cc.get('env'), help='environment mode (dev or prod)')\n\n # parse before to enable correct plugin discovery\n args = parser.parse_known_args()[0]\n if args.env.startswith('dev'):\n enable_dev_mode()\n else:\n enable_prod_mode()\n\n # resolve the default command to decide which application to launch\n default_command = _resolve_commands(parser)\n if default_command is not None:\n # set a default subparse to extract the defined arguments from the instance to the main arguments (?)\n set_default_subparser(parser, default_command)\n\n args = parser.parse_args()\n\n _set_runtime_infos(args)\n\n main = args.launcher(args) # execute the launcher function, which returns another function\n\n if args.use_reloader:\n _log.info('start application using reloader...')\n run_with_reloader(main, extra_files=_config_files())\n else:\n _log.info('start application...')\n main()", "async def start(self):\n server = await asyncio.start_server(\n self.handle_request, self.host, self.port)\n\n addr = server.sockets[0].getsockname()\n print(f'Serving on {addr}')\n\n async with server:\n await server.serve_forever()", "def runserver():\n\tapp.run(host = '0.0.0.0', port = 5000)", "def cmd_start(self, app_name=None):\n rc = self.socket_command_with_project('start', app_name)\n return rc", "def runserver():\n app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG, threaded=config.THREADED)", "def connect(self):\n self.ipv4 = socket.gethostbyname(socket.gethostname())\n self.addr = (self.ipv4, HttpServer.PORT)\n self.server.bind(self.addr)\n print(\"[SETUP] server bound to IPv4 address\", self.ipv4, \"on port\", HttpServer.PORT)\n self.server.listen()\n print(\"[SETUP] server listening for connections\")", "def run(self, **kwargs):\n app = self.create_app()\n\n app.run(host=self.host, port=self.port, **kwargs)", "def start():\n # Import any local level utilities that may be used\n # before the web-server is initialized.\n from django.core.management import call_command\n from db.models import ApplicationState\n from db.utilities import generate_models\n\n # Run the migrate command within django.\n # Making sure our models are upto date.\n call_command(command_name=\"migrate\", app=\"titandash\")\n\n # Server is being started, it is safe for us\n # to update our active flag.\n ApplicationState.objects.set(state=True)\n\n # Generate any initial models that we expect\n # to be available by default.\n generate_models()\n\n _url = EEL_DASHBOARD if User.objects.valid() else EEL_LOGIN\n\n logger.info(\"starting titandash application with options: '{options}'\".format(options={\"path\": _url, **EEL_START_OPTIONS}))\n # Start eel, providing our start url defined above, the close callback\n # to deal with cleanup functionality, and default options.\n eel.start(_url, close_callback=close_callback, **EEL_START_OPTIONS)", "def startserver(path):\n global urlpath\n urlpath = path\n app.run(debug=True, host='0.0.0.0', port=4444)", "def start_server(self):\n self.logger.info(\"Starting WebSocket server on port %d\" % self.port)\n http_server = Thread(target=tornado.ioloop.IOLoop.instance().start)\n http_server.start()", "def run(self):\n self._connection = self.connect()\n self._connection.ioloop.start()", "def run(self):\n self._connection = self.connect()\n self._connection.ioloop.start()", "def run(self, host='127.0.0.1', port=5000):\n global _host, _port\n\n _port = port\n _host = host\n return self.socket.run(self.app, host, port)", "def start(parse_opts):\n global opts\n opts = parse_opts\n app.run(host='0.0.0.0')", "def main():\n print(\"Starting python server...\")\n\n # Set address to localhost\n address = \"tcp://127.0.0.1:\" + parse_port()\n\n # Start server with class API as \n server = zerorpc.Server(API.API())\n server.bind(address)\n\n print(\"Server started running on {}\".format(address))\n\n # Blocking command. Keeps server running\n server.run()", "def connect_to_server(self):\n \n server.setserver(\n ip=self.config.get('Network', 'ip'),\n port=self.config.getint('Network', 'port')\n )\n self._gui_server = server.connect(self)", "def run():\n server = current_server()\n server._auto_stop = True\n return start()", "def start():\n server = current_server()\n logger.info('Starting Flexx event loop.')\n server.start()", "def _StartServer( self ):\n with self._gocode_lock:\n _logger.info( 'Starting Gocode server' )\n\n self._gocode_port = utils.GetUnusedLocalhostPort()\n self._gocode_host = '127.0.0.1:{0}'.format( self._gocode_port )\n\n command = [ self._gocode_binary_path,\n '-s',\n '-sock', 'tcp',\n '-addr', self._gocode_host ]\n\n if _logger.isEnabledFor( logging.DEBUG ):\n command.append( '-debug' )\n\n self._gocode_stdout = utils.CreateLogfile(\n LOGFILE_FORMAT.format( port = self._gocode_port, std = 'stdout' ) )\n self._gocode_stderr = utils.CreateLogfile(\n LOGFILE_FORMAT.format( port = self._gocode_port, std = 'stderr' ) )\n\n with utils.OpenForStdHandle( self._gocode_stdout ) as stdout:\n with utils.OpenForStdHandle( self._gocode_stderr ) as stderr:\n self._gocode_handle = utils.SafePopen( command,\n stdout = stdout,\n stderr = stderr )", "def start(self):\n if self._params.arq:\n s = udpsocket.UDPSocket()\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n try:\n s.bind(('', self._params.port))\n s.listen(5)\n self.check_and_print_debug_message(\n 'HTTP Server is running on port: ' + str(self._params.port))\n while True:\n c, addr = s.accept()\n Thread(\n target=self.process_http_request,\n args=(c, addr)\n ).start()\n except Exception as e:\n self.check_and_print_debug_message(str(e))\n s.close()\n finally:\n self.check_and_print_debug_message(\n \"HTTP Server connection is closed.\")\n s.close()", "def start_server(self):\n if self.esp_mgr.ap:\n self.server_socket = adafruit_esp32spi_socket.socket()\n self.esp_mgr.esp.start_server(23, self.server_socket.socknum)", "def start_server(self):\n if not self._server:", "def main(self):\n self.parse_option()\n self.set_option()\n\n r = Bootscripts()\n reactor.listenTCP(8009, server.Site(r))\n reactor.run()", "def run(self):\n run_simple(self.hostname, self.port, self.dispatch,\n use_reloader=self.debug)", "def startServer(self):\n processor = ThinService.Processor(self.serverLogic)\n serverSocket = TSocket.TServerSocket(Constants.SERVER_HOST, Constants.SERVER_PORT)\n transportFactory = TTransport.TBufferedTransportFactory()\n protocolFactory = TBinaryProtocol.TBinaryProtocolFactory()\n\n server = TServer.TSimpleServer(processor, serverSocket, transportFactory, protocolFactory)\n server.serve()", "def run():\n register_component(\"press\")\n run_app(host=\"0.0.0.0\", port=8080, debug=True, workers=os.cpu_count())", "def server():", "def server():", "def start_server(self, app, **kwargs):\n\n # start server with app and pass Dash arguments\n self.server(app, **kwargs)\n\n # set the default server_url, it implicitly call wait_for_page\n self.server_url = self.server.url", "def run(self, host=\"0.0.0.0\", port=8080):\n self.app.run(host=host, port=port, debug=True, use_reloader=False,\n use_evalex=False)", "def main():\n server = ThreadedServer(MasterControllerService, port=5000)\n server.start()", "def run_server(self):\n self.establish_connection()\n while True:\n self.receive_data(self.conn)", "def run():\n app.run(debug=True, port=5001)", "def main():\n return run_server(**parse_server_args())", "def start(self):\n if self._use_ssl:\n try:\n ca_file = CONF.ssl_ca_file\n cert_file = CONF.ssl_cert_file\n key_file = CONF.ssl_key_file\n\n if cert_file and not os.path.exists(cert_file):\n raise RuntimeError(\n _(\"Unable to find cert_file : %s\") % cert_file)\n\n if ca_file and not os.path.exists(ca_file):\n raise RuntimeError(\n _(\"Unable to find ca_file : %s\") % ca_file)\n\n if key_file and not os.path.exists(key_file):\n raise RuntimeError(\n _(\"Unable to find key_file : %s\") % key_file)\n\n if self._use_ssl and (not cert_file or not key_file):\n raise RuntimeError(\n _(\"When running server in SSL mode, you must \"\n \"specify both a cert_file and key_file \"\n \"option value in your configuration file\"))\n ssl_kwargs = {\n 'server_side': True,\n 'certfile': cert_file,\n 'keyfile': key_file,\n 'cert_reqs': ssl.CERT_NONE,\n }\n\n if CONF.ssl_ca_file:\n ssl_kwargs['ca_certs'] = ca_file\n ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED\n\n self._socket = eventlet.wrap_ssl(self._socket,\n **ssl_kwargs)\n\n self._socket.setsockopt(socket.SOL_SOCKET,\n socket.SO_REUSEADDR, 1)\n # sockets can hang around forever without keepalive\n self._socket.setsockopt(socket.SOL_SOCKET,\n socket.SO_KEEPALIVE, 1)\n\n # This option isn't available in the OS X version of eventlet\n if hasattr(socket, 'TCP_KEEPIDLE'):\n self._socket.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPIDLE,\n CONF.tcp_keepidle)\n\n except Exception:\n with excutils.save_and_reraise_exception():\n LOG.error(_(\"Failed to start %(name)s on %(host)s\"\n \":%(port)s with SSL support\") % self.__dict__)\n\n wsgi_kwargs = {\n 'func': eventlet.wsgi.server,\n 'sock': self._socket,\n 'site': self.app,\n 'protocol': self._protocol,\n 'custom_pool': self._pool,\n 'log': self._wsgi_logger,\n 'log_format': CONF.wsgi_log_format\n }\n\n if self._max_url_len:\n wsgi_kwargs['url_length_limit'] = self._max_url_len\n\n self._server = eventlet.spawn(**wsgi_kwargs)", "def start(self):\n\n self.socket.bind((self.ip, self.port))\n self.socket.listen(self.listenNumber)\n self.printLine()\n print(\"start for listening \")", "def _start_server(apps, port, no_browser):\n # necessary for the dashboard to work when called from a notebook\n asyncio.set_event_loop(asyncio.new_event_loop())\n\n # this is adapted from bokeh.subcommands.serve\n with report_server_init_errors(port=port):\n server = Server(apps, port=port)\n\n # On a remote server, we do not want to start the dashboard here.\n if not no_browser:\n\n def show_callback():\n server.show(\"/\")\n\n server.io_loop.add_callback(show_callback)\n\n address_string = server.address if server.address else \"localhost\"\n\n print( # noqa: T201\n \"Bokeh app running at:\",\n f\"http://{address_string}:{server.port}{server.prefix}/\",\n )\n server._loop.start()\n server.start()", "def _start(self, host):\n pass", "def run(self):\n self._connection = self.open_connection()\n self._connection.ioloop.start()", "def _run(self) -> None:\n asyncio.set_event_loop(self._server_loop)\n self._server_loop.run_until_complete(self._runner.setup())\n\n site = web.TCPSite(\n self._runner, self.host, self.port, ssl_context=self.ssl_context\n )\n self._server_loop.run_until_complete(site.start())\n\n # If the Server was initialized with port 0, determine what port the\n # underlying server ended up listening on\n if self.port == 0:\n site_server = cast(AsyncioServer, site._server)\n sockets = cast(List[Socket], site_server.sockets)\n socket = sockets[0]\n self.port = socket.getsockname()[1]\n\n self._startup_event.set()\n self._server_loop.run_forever()", "def run(self):\n # Needed in order to start the file server in detached mode.\n self._execute_command('yum install -y screen', sudo=True, retries=5)\n\n self.logger.info('Starting up SimpleHTTPServer on {0}'\n .format(self.port))\n self._execute_command('cd {0} && screen -dm {1}'\n .format(self.fileserver_path, self.server_cmd),\n pty=False)", "def start(self):\n threading.Thread(target=self.serve_forever).start()", "def start(self) -> None:\n if not self.STARTED:\n sysinfo_dump: dict = tools.sysinfo()\n self.LOG.debug(f\"SYSTEM INFO: {tools.json_dump(sysinfo_dump)}\")\n\n try:\n self.AUTH.login()\n except Exception as exc: # noqa: BLE001\n if not self.WRAPERROR:\n raise\n\n pre = f\"Unable to connect to {self.url!r}\"\n connect_exc = ConnectError(f\"{pre}: {exc}\")\n\n if isinstance(exc, requests.ConnectTimeout):\n timeout = self.HTTP.CONNECT_TIMEOUT\n msg = f\"{pre}: connection timed out after {timeout} seconds\"\n connect_exc = ConnectError(msg)\n elif isinstance(exc, requests.ConnectionError):\n reason = self._get_exc_reason(exc=exc)\n connect_exc = ConnectError(f\"{pre}: {reason}\")\n elif isinstance(exc, InvalidCredentials):\n connect_exc = ConnectError(f\"{pre}: Invalid Credentials supplied\")\n\n connect_exc.exc = exc\n raise connect_exc from exc\n\n self.STARTED = True\n self.LOG.info(str(self))", "def openSocket(self):\n try:\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server.bind((self.host, self.port))\n self.server.listen(self.backlog)\n except socket.error as e:\n raise ErrorSocketOpen(self.feederName, e.strerror)\n if self.verbosity >= 1:\n print('Starting config server for %s at %s, port %s.' % (self.feederName, self.host, self.port))", "def start(self):\n\n address = (socket.gethostbyname(self.hostname), self.port)\n logger.info(\"Connecting to %r\" % (address,))\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect(address)\n self._start_processors()\n return self", "def server_init(self):\n if not self._web_interface_thread.isAlive():\n # spawn the web interface.\n self._web_interface_thread.start()", "def run(self):\n try:\n self.server = socket(AF_INET, SOCK_STREAM)\n self.server.bind(self.address)\n self.server.listen(5) # Allows up to 5 waiting clients\n\n while True:\n self.myView.updateStatus('Waiting for connection ...')\n client, address = self.server.accept()\n self.myView.updateStatus('... connected from ' + str(address))\n handler = ClientHandler(client, self.bank, self.myView)\n handler.start()\n\n except Exception as message:\n self.myView.updateStatus(message)\n self.server.close()\n self.myView.updateStatus(\"Server shutting down.\")", "def main():\n try:\n port = 8080\n ip = '0.0.0.0'\n http_server = WSGIServer((ip, port),\n app,\n log=logging,\n error_log=logging,\n )\n print(\"Server started at: {0}:{1}\".format(ip, port))\n http_server.serve_forever()\n except Exception as exc:\n logger.error(exc.message)\n logger.exception(traceback.format_exc())\n finally:\n # Do something here\n pass", "def start_server(self):\n if not self.config_store:\n raise Exception(\"Error owamp: the configuration of the server has not been done\\n\")\n\n self.server = OwampServer(self.config_store)\n try:\n self.server.launch_owampd()\n except Exception as err:\n template = \"Error owamp: Exception of type {0} occurred:\\n{1!r}\"\n message = template.format(type(err).__name__, err)\n raise Exception(message)", "def Run(self):\n self.BuildWebAppSite()\n\n self.BuildRPCSite(self.env.umpire_cli_port, self.methods_for_cli, '0.0.0.0')\n self.BuildRPCSite(self.env.umpire_rpc_port, self.methods_for_dut)\n\n # Start services.\n reactor.callWhenRunning(self.OnStart)\n # And start reactor loop.\n reactor.run()", "def run(self):\n server = TCPServer((self.host, self.port), TCPHandler)\n server.lymphocytes_getter = self.lymphocytes_getter\n\n #runs forever - so make this thread daemon\n server.serve_forever()", "def setup_for_run(self):\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server.bind((self.ip_address, self.port))\n self.server.listen(100)", "def main():\n tornado.options.parse_command_line()\n ioloop = tornado.ioloop.IOLoop.instance()\n http_server = tornado.httpserver.HTTPServer(App())\n http_server.listen(options.port)\n tornado.autoreload.start()\n ioloop.start()", "def __start_server(self):\n self.server = Server(self.browser_mob)\n self.server.start()\n self.proxy = self.server.create_proxy()", "def run(self):\n self.node_id = CONFIG.node_id\n self.running = Event()\n if not CONFIG.master_pub or not CONFIG.master_repl:\n print colors.yellow(\"Master IP:port is not set in config file (%s)\"\n % CONFIG._fn)\n master_pub = raw_input(\"Enter Master PUB uri (IP or IP:port):\")\n if \":\" in master_pub:\n ip, _, port = master_pub.rpartition(\":\")\n else:\n ip = master_pub\n port = 5551\n CONFIG.update(\"General\", \"master_pub\", \"%s:%s\" % (ip,\n port))\n master_repl = raw_input(\"Enter Master REPLY uri (IP or IP:port), \"\n \"hit ENTER for default(%s:5552):\" % ip)\n if not master_repl:\n port = 5552\n elif \":\" in master_repl:\n ip, _, port = master_repl.rpartition(\":\")\n else:\n ip = master_repl\n port = 5552\n CONFIG.update(\"General\", \"master_repl\", \"%s:%s\" % (ip,\n port))\n CONFIG.reload()\n\n if not validate_address(CONFIG.master_pub) or \\\n not validate_address(CONFIG.master_repl):\n LOG.error('Server IP not present in config or is not valid.\\n'\n 'Check your config')\n exit(1)\n\n if not self.node_id:\n LOG.error(\"The node id not set in config. \"\n \"Run program with config option first\")\n exit(1)\n\n self.backend = self.transport_class.from_config(\n CONFIG, **vars(self.args))\n load_plugins(CONFIG)\n self.sessions = {}\n self.matcher = Matcher(self.node_id, self.backend.meta())\n\n LOG.info(\"Starting node\")\n self.details()\n self._sig_int = signal.getsignal(signal.SIGINT)\n self._sig_term = signal.getsignal(signal.SIGTERM)\n\n if os.name == 'nt':\n # Use Ctrl+C to invoke clean on Windows\n import win32api\n win32api.SetConsoleCtrlHandler(self.clean, True)\n else:\n signal.signal(signal.SIGINT, self._handle_terminate)\n signal.signal(signal.SIGTERM, self._handle_terminate)\n\n # Invoke clean for sessions\n signal.signal(signal.SIGHUP, self.clean)\n\n if not self.backend.prepare():\n LOG.info(\"Cannot start transport backend\")\n self._handle_terminate()\n exit(1)\n\n def request_processor():\n req_queue = self.backend.consume_queue('requests',\n ident=\"DISPATCHER\")\n poller = self.backend.create_poller(req_queue)\n while not self.running.is_set():\n try:\n ready = poller.poll(200)\n if not ready:\n continue\n if req_queue in ready:\n message = req_queue.recv()[0]\n if not message:\n continue\n job = JobTarget.build(message)\n if job:\n self.target_match(job)\n except ConnectionError:\n break\n except Exception:\n continue\n req_queue.close()\n\n Thread(target=request_processor).start()\n\n self.backend.loop()\n\n LOG.info(\"Node exited\")", "def server_init(self):\n if not self.web_interface_thread.isAlive():\n # spawn the web interface.\n self.web_interface_thread.start()", "def main(self):\n addr = (self.uri, self.port)\n try:\n self.client.connect(addr)\n except socket.gaierror:\n print(\"[ERROR] not a valid URI. Try again please...\")\n else:\n print(\"[SETUP] client connected to IPv4 address\", self.uri, \"on port\", self.port)\n self.handler()", "def server(host, port, debug):\n run_server(host, port, debug)", "def start(self):\n\n #print 'start tcp port 8080 forwarding'\n subprocess.call(r'%s forward tcp:%d tcp:8080'%(self.adbCmd,self.port),shell=True)\n\n\n # this is not mandatory as we already killed adb server, but this could \n # decrease the webview created in andriod server application. maybe\n # it's a bug to create one webview per launch of app?\n #print 'stop existing android server by sending back key'\n for i in xrange(4):\n subprocess.call(r'%s shell input keyevent 4'%self.adbCmd,shell=True)\n\n #print 'start android server activity'\n output=subprocess.check_output(r'%s shell am start -n %s'%(self.adbCmd,\n Service.ANDROID_DRIVER_CLIENT_APP_CMP),\n stderr=subprocess.STDOUT,shell=True).split()\n if len(output)> 5: #if app not installed, there would be error messages\n raise WebDriverException(\"\"\"AndroidDriver needs to be installed on device.\n Download android-server-2.x.apk from\n http://code.google.com/p/selenium/downloads/list\"\"\")\n # wait for WebDriver Client to be launched completely\n time.sleep(2)\n print \"AndroidDriver started on device %s\" % repr(self.device)", "def main():\n global APP\n APP = make_app()\n APP.clients = [] # global list of all connected websocket clients\n APP.printer = Serial('/dev/ttyUSB0', baudrate=19200)\n APP.listen('1337', '0.0.0.0')\n log('Listening on http://0.0.0.0:1337')\n tornado.ioloop.IOLoop.current().start()", "def run_simple_server(tb_app):\n # Mute the werkzeug logging.\n base_logging.getLogger('werkzeug').setLevel(base_logging.WARNING)\n\n try:\n server = serving.make_server(FLAGS.host, FLAGS.port, tb_app, threaded=True)\n server.daemon_threads = True\n except socket.error:\n if FLAGS.port == 0:\n msg = 'TensorBoard unable to find any open port'\n else:\n msg = (\n 'TensorBoard attempted to bind to port %d, but it was already in use'\n % FLAGS.port)\n logging.error(msg)\n print(msg)\n exit(-1)\n\n port = server.socket.getsockname()[1]\n msg = 'Starting TensorBoard %s at http://%s:%d' % (tb_app.tag, FLAGS.host,\n port)\n print(msg)\n logging.info(msg)\n print('(Press CTRL+C to quit)')\n sys.stdout.flush()\n\n server.serve_forever()", "def run(self):\n HOST = 'localhost' # Symbolic name meaning all available interfaces\n PORT = 54123 # Arbitrary non-privileged port\n \n \n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, PORT))\n while(self.running):\n s.listen(1)\n conn, addr = s.accept()\n self.listen_to_connection(conn)\n conn.close()\n s.close()" ]
[ "0.83166367", "0.80134946", "0.79343057", "0.7532442", "0.74999815", "0.74902725", "0.7488411", "0.7407796", "0.7407796", "0.7342273", "0.73058623", "0.72899777", "0.7254523", "0.72319925", "0.72092813", "0.71867055", "0.7134933", "0.71249473", "0.7120971", "0.7115196", "0.7109324", "0.7103823", "0.7099091", "0.70686126", "0.70343643", "0.70294255", "0.7023837", "0.7016961", "0.7016732", "0.7010056", "0.6979397", "0.69769233", "0.69648296", "0.69641423", "0.6943546", "0.69241023", "0.6920063", "0.69080585", "0.69045955", "0.6878974", "0.68749547", "0.68651575", "0.68577564", "0.6840986", "0.68370533", "0.68369037", "0.6832504", "0.6805127", "0.68026483", "0.68026483", "0.6796355", "0.67959756", "0.67881143", "0.6779061", "0.67707086", "0.6770127", "0.67575", "0.6737238", "0.6713967", "0.67127943", "0.66906506", "0.6686411", "0.6684149", "0.66726935", "0.6664954", "0.6664954", "0.66547394", "0.66467154", "0.66415423", "0.6640934", "0.66336346", "0.6630409", "0.6629162", "0.66164243", "0.66120607", "0.66085416", "0.6607738", "0.66076183", "0.65984404", "0.6591545", "0.6587286", "0.65825605", "0.65610456", "0.65517825", "0.6539172", "0.65390915", "0.65388316", "0.6537784", "0.65350413", "0.65338093", "0.65299934", "0.65147233", "0.65093523", "0.65060914", "0.6503713", "0.6502543", "0.6495832", "0.64905405", "0.6490225", "0.64895844" ]
0.75330657
3
Configure the Player object by attaching some event handlers, adding default route and loading quests
def config(self): # Set up on_startup listener for connecting to the server self.aiohttp.on_startup.append(self.ws.connect) # Await websocket and client session termination async def shutdown(app): await self.ws.close() await self.client.close() # Set up on_shutdown listeners for graceful shutdown self.aiohttp.on_shutdown.append(shutdown) # Add a default route self.aiohttp.router.add_route('*', '/', lambda request: web.json_response({ "msg": "I'm alive" })) # Load user defined quests self.load_quests()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _handler_init(self):\r\n\t\tself._handlers[\"player-join\"] = FunctionDelegate()\r\n\t\tself._handlers[\"player-quit\"] = FunctionDelegate()\r\n\t\tself._handlers[\"game-start\"] = FunctionDelegate()\r\n\t\tself._handlers[\"game-stop\"] = FunctionDelegate()", "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "def setup(self):\n # Set up the player\n self.player_sprite = arcade.Sprite(\"Sprites/Jugador/Jugador.jpg\", SPRITE_SCALING)\n self.player_sprite.center_x = 100\n self.player_sprite.center_y = 100\n self.player_list = arcade.SpriteList()\n self.player_list.append(self.player_sprite)\n\n # Listado de habitaciones\n self.rooms = []\n self.rooms.append(setup_pueblo())\n\n #Contador de habitación\n self.current_room = 0\n\n #Fisicas\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].wall_list)", "def setUp(self):\n self.player = Player()", "def setup(self):\n # Create your sprites and sprite lists here\n self.game: Game = Game(SCREEN_WIDTH, SCREEN_HEIGHT, TILE_SIZE, 1, grid_layers = 4)\n self.game.game_message = \"Lead the Rabbit home\"\n\n # show the menu so that we see the instructions\n self.game.menu.button_list[0].text = \"Start\"\n self.game.menu.is_visible = True", "def setup_game(self):", "def __init__(self, *args, **kwargs):\n super(Player, self).__init__(*args, **kwargs)", "def init_player():\n global active_track_idx\n global track_last_slided_pos\n global track_last_paused_pos\n global track_total_play_time \n\n # INITIALIZE Player\n active_track_idx = -1\n cancel_update_play_time_loop()\n cancel_track_end_event_loop()\n track_status.set(\"---\")\n track_title.set(\"--- : \")\n play_pause_btn.configure(image=play_img)\n track_last_slided_pos = 0\n track_last_paused_pos = 0\n track_total_play_time = 0\n track_pos_label.configure(text=\"00:00\")\n track_length_label.configure(text=\"00:00\")\n track_pos_slider.configure(state=\"disabled\")\n track_pos.set(0)", "def __init__(self, config):\n super().__init__(config)\n\n # Prepare the timer.\n self.timer = 0\n\n # Set the current player index.\n self.current_player_index = 0\n # If we are the client, the server goes first.\n for i in range(len(sys.argv)):\n if sys.argv[i] == \"--client\":\n self.current_player_index = 1\n\n # Prepare the phase counter.\n self.__current_phase = Game.PHASE_PREPARE\n # Prepare the shot location store.\n self.__current_fire_location = None\n self.__current_fire_effect = None", "def on_init(self):\n self.controller = gameController.Controller()", "def _set_handler_to_server(self):\r\n\t\tself._comm_server.set_disconnection_handler(self.player_quit)\r\n\t\tself._comm_server.add_command_handler(\"join\", self.player_join)\r\n\t\tself._comm_server.add_command_handler(\"position\", self.player_position)\r\n\t\tself._comm_server.add_command_handler(\"send-to\", self.player_send_msg)\r\n\t\tself._comm_server.add_command_handler(\"send-team\", self.player_team_broadcast)", "def setup(self):\n\n self.characters = arcade.SpriteList()\n self.dungeon_sprites = arcade.SpriteList(\n use_spatial_hash=True, spatial_hash_cell_size=16\n )\n\n self.player = Item(ord(\"@\"), arcade.csscolor.WHITE)\n self.player.x = 0\n self.player.y = 0\n self.characters.append(self.player)\n\n # Size of the map\n map_width = MAP_WIDTH\n map_height = MAP_HEIGHT\n\n # Some variables for the rooms in the map\n room_max_size = 10\n room_min_size = 6\n max_rooms = 30\n\n self.game_map = GameMap(map_width, map_height)\n self.game_map.make_map(\n max_rooms, room_min_size, room_max_size, map_width, map_height, self.player\n )\n\n # Draw all the tiles in the game map\n for y in range(self.game_map.height):\n for x in range(self.game_map.width):\n wall = self.game_map.tiles[x][y].block_sight\n sprite = Item(WALL_CHAR, arcade.csscolor.BLACK)\n if wall:\n sprite.block_sight = True\n else:\n sprite.block_sight = False\n\n sprite.x = x\n sprite.y = y\n\n self.dungeon_sprites.append(sprite)\n\n recalculate_fov(\n self.player.x, self.player.y, FOV_RADIUS, self.dungeon_sprites\n )", "def __init__(self):\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)", "def bot_init():\n client.Console.Say('Hello World')\n global player\n player = client.GetPlayer()", "def __init__(self):\n\n ShowBase.__init__(self)\n controls.setup_mouse()\n self.tpp_camera = TPPCamera()\n\n try:\n self.world = World()\n except OSError:\n raise\n\n self.physics = Physics(self.world.player)\n base.taskMgr.add(self.__main_loop, \"__main_loop\")", "def init(cls):\n\n cls.configs = yaml.load( file('../local/config.yaml') )\n cls.is_online = False\n cls.state = State.playing\n cls.classes = classes\n cls.guiclasses = guiclasses\n\n # set up pygame and init\n pygame.init()\n\n # Set up the window\n cls.screen = pygame.display.set_mode(\n tuple(cls.configs['options']['resolution']),\n 0,\n 32)\n classes.screen = cls.screen\n guiclasses.screen = cls.screen", "def __init__(self, player):\n self.player = player", "def start(self, myPlayerName, players):\r\n # Load the environment model.\r\n self.levelNode = render.attachNewNode(\"Level node\")\r\n self.level = LevelContainer(self.levelName)\r\n self.level.render(self.levelNode, base.loader)\r\n self.levelNode.setAttrib(ShadeModelAttrib.make(ShadeModelAttrib.MFlat))\r\n\r\n base.win.setClearColor(Vec4(0,0,0,1))\r\n self.title = addTitle(\"PushBack\")\r\n self.players = dict()\r\n \r\n self.playersNode = render.attachNewNode(\"Players Root Node\")\r\n self.healthbar = DirectWaitBar(range=1.0, barColor=(1.0, 0.0, 0.0, 1.0), \r\n value=1.0, frameSize=(-0.45,0.45,1.0,0.98))\r\n base.disableMouse()\r\n base.cam.reparentTo(self.playersNode)\r\n base.cam.setCompass()\r\n base.cam.setH(0)\r\n base.cam.setZ(CAM_HEIGHT)\r\n base.cam.setY(-CAM_HEIGHT)\r\n base.cam.setP(-CAM_ANGLE)\r\n \r\n ambientLight = AmbientLight(\"ambientLight\")\r\n ambientLight.setColor(Vec4(.3, .3, .3, 1))\r\n directionalLight = DirectionalLight(\"directionalLight\")\r\n directionalLight.setDirection(Vec3(-5, -5, -5))\r\n directionalLight.setColor(Vec4(1, 1, 1, 1))\r\n directionalLight.setSpecularColor(Vec4(1, 1, 1, 1))\r\n render.setLight(render.attachNewNode(ambientLight))\r\n render.setLight(render.attachNewNode(directionalLight))\r\n skins = [\"stony_green\", \"stony_red\", \"stony_blue\", \"bonbon_blue\",\"bonbon_green\", \"red\", \"pushette\", \"blue\"]\r\n for player in players:\r\n print \"Init player %s\" % player[0]\r\n p = Player(player[0])\r\n p.reparentTo(self.playersNode)\r\n p.setPos(player[1])\r\n p.setColor(skins.pop())\r\n self.players[player[0]] = p\r\n if myPlayerName == player[0]:\r\n self.myPlayer = p\r\n base.cam.reparentTo(self.myPlayer)", "def init_game(self):\n self.view.carregar_jogadores_possiveis(self._possible_players_list())\n self.view.put_view_in_main_loop()", "def __init__(self, player):\n\t\tself.player = player", "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.clock = pg.time.Clock()\n self.fps = 60.0\n self.keys = pg.key.get_pressed()\n self.done = False\n self.player = Player((50,875), 4)\n self.level = pg.Surface((1000,1000)).convert()\n self.level_rect = self.level.get_rect()\n self.viewport = self.screen.get_rect(bottom=self.level_rect.bottom)\n self.win_text,self.win_rect = self.make_text()\n self.obstacles = self.make_obstacles()", "def setup(self):\n\n # Set up the Cameras\n viewport = (0, 0, self.window.width, self.window.height)\n self.camera = arcade.SimpleCamera(viewport=viewport)\n self.gui_camera = arcade.SimpleCamera(viewport=viewport)\n\n # Map name\n map_name = \":resources:tiled_maps/map_with_ladders.json\"\n\n # Layer Specific Options for the Tilemap\n layer_options = {\n LAYER_NAME_PLATFORMS: {\n \"use_spatial_hash\": True,\n },\n LAYER_NAME_MOVING_PLATFORMS: {\n \"use_spatial_hash\": False,\n },\n LAYER_NAME_LADDERS: {\n \"use_spatial_hash\": True,\n },\n LAYER_NAME_COINS: {\n \"use_spatial_hash\": True,\n },\n }\n\n # Load in TileMap\n self.tile_map = arcade.load_tilemap(map_name, TILE_SCALING, layer_options)\n\n # Initiate New Scene with our TileMap, this will automatically add all layers\n # from the map as SpriteLists in the scene in the proper order.\n self.scene = arcade.Scene.from_tilemap(self.tile_map)\n\n # Keep track of the score\n self.score = 0\n\n # Shooting mechanics\n self.can_shoot = True\n self.shoot_timer = 0\n\n # Set up the player, specifically placing it at these coordinates.\n self.player_sprite = PlayerCharacter()\n self.player_sprite.center_x = (\n self.tile_map.tile_width * TILE_SCALING * PLAYER_START_X\n )\n self.player_sprite.center_y = (\n self.tile_map.tile_height * TILE_SCALING * PLAYER_START_Y\n )\n self.scene.add_sprite(LAYER_NAME_PLAYER, self.player_sprite)\n\n # Calculate the right edge of the my_map in pixels\n self.end_of_map = self.tile_map.width * GRID_PIXEL_SIZE\n\n # -- Enemies\n enemies_layer = self.tile_map.object_lists[LAYER_NAME_ENEMIES]\n\n for my_object in enemies_layer:\n cartesian = self.tile_map.get_cartesian(\n my_object.shape[0], my_object.shape[1]\n )\n enemy_type = my_object.properties[\"type\"]\n if enemy_type == \"robot\":\n enemy = RobotEnemy()\n elif enemy_type == \"zombie\":\n enemy = ZombieEnemy()\n enemy.center_x = math.floor(\n cartesian[0] * TILE_SCALING * self.tile_map.tile_width\n )\n enemy.center_y = math.floor(\n (cartesian[1] + 1) * (self.tile_map.tile_height * TILE_SCALING)\n )\n if \"boundary_left\" in my_object.properties:\n enemy.boundary_left = my_object.properties[\"boundary_left\"]\n if \"boundary_right\" in my_object.properties:\n enemy.boundary_right = my_object.properties[\"boundary_right\"]\n if \"change_x\" in my_object.properties:\n enemy.change_x = my_object.properties[\"change_x\"]\n self.scene.add_sprite(LAYER_NAME_ENEMIES, enemy)\n\n # Add bullet spritelist to Scene\n self.scene.add_sprite_list(LAYER_NAME_BULLETS)\n\n # --- Other stuff\n # Set the background color\n if self.tile_map.background_color:\n self.window.background_color = self.tile_map.background_color\n\n # Create the 'physics engine'\n self.physics_engine = arcade.PhysicsEnginePlatformer(\n self.player_sprite,\n platforms=self.scene[LAYER_NAME_MOVING_PLATFORMS],\n gravity_constant=GRAVITY,\n ladders=self.scene[LAYER_NAME_LADDERS],\n walls=self.scene[LAYER_NAME_PLATFORMS],\n )", "def setup_new_game(self):\r\n self._player = Player()\r\n self._stats = GameStats(self._bb_settings)\r\n self._scoreboard = Scoreboard(self._bb_settings, self._screen)", "def __init__(self, master: tk.Tk):\n self._master = master\n master.title(\"Mario\") # set title\n self._config = {} # set config nest dictionary\n self._level_dic = {} # set level nest dictionary\n self._config_status = True # status for successfully read config or not\n self._pause = False # if True, game will pause, if False, game continue\n\n if self.config_input(): # read config info from file. If can't read, get False\n for key in self._config.keys(): # seeking for some basic info\n if key == 'World': # from heading: 'World'\n if all(k in self._config[key] for k in ('gravity', 'start')):\n try: # get gravity from 'World', then turns it into 'int'\n self._gravity = int(self.get_config(key, 'gravity'))\n except ValueError: # if failed\n messagebox.showerror(\"Invalid value in World\", \"Invalid value in gravity!\")\n self.config_exit()\n try: # get start level. Try to open it\n self._start_level = self.get_config(key, 'start')\n open(self._start_level)\n except IOError: # if failed\n messagebox.showerror(\"Invalid value in World\",\n \"Don't have this \" + self._start_level + \" file!\")\n self.config_exit()\n else: # if 'World' don't have gravity and start_level\n messagebox.showerror(\"Missing attribute\", \"Missing attributes in World!\")\n self.config_exit()\n elif key == 'Player': # from heading: 'Player'\n if all(k in self._config[key] for k in\n ('character', 'x', 'y', 'mass', 'health', 'max_velocity')):\n try: # try get all those stuff below, and change their type\n self._x = float(self.get_config(key, 'x')) # get x co-ordinate\n self._y = float(self.get_config(key, 'y')) # get y co-ordinate\n self._mass = int(self.get_config(key, 'mass')) # get mass\n self._max_health = int(self.get_config(key, 'health')) # get max_health\n self._max_velocity = int(self.get_config(key, 'max_velocity')) # get max_velocity\n except ValueError: # if failed => invalid value\n messagebox.showerror(\"Invalid value in Player\", \"Invalid value in Player attributes!\")\n self.config_exit()\n self._character = self.get_config(key, 'character') # get character\n if self._character not in PLAYERS: # check character\n messagebox.showerror(\"Invalid value in Player\",\n \"Don't have this '\" + self._character + \"' character!\")\n self.config_exit()\n else: # must missing some of the attribute\n messagebox.showerror(\"Missing attribute\", \"Missing attributes in Player!\")\n self.config_exit()\n else: # from heading which is not 'World' and 'Player' => 'Level'\n try: # check the level existence\n open(key)\n if self.get_config(key, 'goal') is not None: # level must have a goal\n self._this_level = {} # create a new dic for this level\n self._this_level.update(goal=self.get_config(key, 'goal')) # store the goal\n else: # warn that must have a goal\n messagebox.showerror(\"Missing attribute\", \"'\" + key +\n \"' level must have a goal!\")\n self.config_exit()\n # if has tunnel, update; if don't, update with None\n self._this_level.update(tunnel=self.get_config(key, 'tunnel'))\n self._this_level.update(record=(self._max_health, 0)) # set record(health, score)\n # update this level to the general level dic\n self._level_dic.update(dict([(key, self._this_level)]))\n except IOError: # if this level don't exist\n messagebox.showerror(\"Invalid heading\", \"Don't have this '\" + key + \"' level\")\n self.config_exit()\n else: # if fail in read progress\n self.config_exit()\n\n if self._config_status: # only build the world with success config settings\n # build the world with config settings\n world_builder = WorldBuilder(BLOCK_SIZE, gravity=(0, self._gravity), fallback=create_unknown)\n world_builder.register_builders(BLOCKS.keys(), create_block)\n world_builder.register_builders(ITEMS.keys(), create_item)\n world_builder.register_builders(MOBS.keys(), create_mob)\n self._builder = world_builder\n\n self._player = Player(self._character, max_health=self._max_health)\n # set max_velocity to player to avoid hard-coding\n self._player.set_max_velocity(self._max_velocity)\n self._filename = self._start_level # set current level\n self._goal = self._level_dic[self._filename]['goal'] # get current level's goal\n self._tunnel = self._level_dic[self._filename]['tunnel'] # get current level's tunnel\n self.reset_world(self._filename) # load the start level\n # View entities on canvas\n self._renderer = MarioViewRenderer(BLOCK_IMAGES, ITEM_IMAGES, MOB_IMAGES)\n size = tuple(map(min, zip(MAX_WINDOW_SIZE, self._world.get_pixel_size())))\n self._view = GameView(master, size, self._renderer)\n self._view.pack()\n\n self._press = False # status for whether player press the switch\n self.bind() # bind the keyboard\n\n # Status Display\n self._percentage = 1 # player health percentage\n self._score = self._player.get_score() # player's score\n self._statue = StatueDisplay(master, size[0], size[1]) # build statue display\n self._statue.pack(side=tk.BOTTOM, fill=tk.X) # pack it in the bottom\n\n # Wait for window to update before continuing\n master.update_idletasks()\n self.step()\n\n # File menu\n menubar = tk.Menu(self._master)\n # Tell master what is this menu\n self._master.config(menu=menubar)\n file_menu = tk.Menu(menubar) # build a menu\n menubar.add_cascade(label=\"File\", menu=file_menu) # File\n file_menu.add_command(label=\"Load Level\", command=self.load_map) # Load Level\n file_menu.add_command(label=\"Reset Level\", command=self.reset_map) # Reset Level\n file_menu.add_command(label=\"High Score\", command=self.show_score) # show High Score\n file_menu.add_command(label=\"Exit\", command=self.exit) # Exit the game\n menubar.add_cascade(label=\"Pause/Begin\", command=self.pause) # pause switch", "def on_init(self):\n pygame.init()\n self.background.load_from_file()\n self.hero.load_from_file()\n self.enemy.load_from_file()\n\n # Some music and sound fx\n # frequency, size, channels, buffersize\n # pygame.mixer.pre_init(44100, 16, 2, 4096)\n self.effect = pygame.mixer.Sound('sounds/bounce.wav')\n pygame.mixer.music.load('sounds/music.wav')\n pygame.mixer.music.play(-1)\n\n self.hero.screen = self.background.screen\n self.enemy.screen = self.background.screen\n self.clock = pygame.time.Clock()\n pygame.display.set_caption(\n 'Angry Floating Guy! World: {} (w to change world, arrows to move, Esc to quit).'.format(self.current_world.name))\n\n self._running = True", "def setup(self, level):\r\n\r\n # Used to keep track of our scrolling\r\n self.view_bottom = 0\r\n self.view_left = 0\r\n\r\n # Keep track of the score\r\n self.score = 0\r\n\r\n # Keep track of lives\r\n # self.lives = 5\r\n\r\n # Create the Sprite lists\r\n self.player_list = arcade.SpriteList()\r\n self.foreground_list = arcade.SpriteList()\r\n self.background_list = arcade.SpriteList()\r\n self.wall_list = arcade.SpriteList()\r\n self.coin_list = arcade.SpriteList()\r\n\r\n # Set up the player, specifically placing it at these coordinates.\r\n image_source = \"images/Alice/Alice7_front.png\"\r\n self.player_sprite = arcade.Sprite(image_source, CHARACTER_SCALING)\r\n self.player_sprite.center_x = PLAYER_START_X\r\n self.player_sprite.center_y = PLAYER_START_Y\r\n self.player_list.append(self.player_sprite)\r\n\r\n # --- Load in a map from the tiled editor ---\r\n\r\n # Name of the layer in the file that has our platforms/walls\r\n platforms_layer_name = 'Platforms'\r\n moving_platforms_layer_name = 'Moving Platforms'\r\n # Name of the layer that has items for pick-up\r\n coins_layer_name = 'Coins'\r\n # Name of the layer that has items for foreground\r\n foreground_layer_name = 'Foreground'\r\n # Name of the layer that has items for background\r\n background_layer_name = 'Background'\r\n # Name of the layer that has items we shouldn't touch\r\n dont_touch_layer_name = \"Don't Touch\"\r\n\r\n # Map name\r\n map_name = f\"map4_level_{level}.tmx\"\r\n\r\n # Read in the tiled map\r\n my_map = arcade.tilemap.read_tmx(map_name)\r\n\r\n # Calculate the right edge of the my_map in pixels\r\n self.end_of_map = my_map.map_size.width * GRID_PIXEL_SIZE\r\n\r\n # -- Background\r\n self.background_list = arcade.tilemap.process_layer(my_map,\r\n background_layer_name,\r\n TILE_SCALING)\r\n\r\n # -- Foreground\r\n self.foreground_list = arcade.tilemap.process_layer(my_map,\r\n foreground_layer_name,\r\n TILE_SCALING)\r\n\r\n # -- Platforms\r\n self.wall_list = arcade.tilemap.process_layer(map_object=my_map,\r\n layer_name=platforms_layer_name,\r\n scaling=TILE_SCALING,\r\n use_spatial_hash=True)\r\n # -- Moving Platforms\r\n moving_platforms_list = arcade.tilemap.process_layer(my_map, moving_platforms_layer_name, TILE_SCALING)\r\n for sprite in moving_platforms_list:\r\n self.wall_list.append(sprite)\r\n\r\n # -- Coins\r\n self.coin_list = arcade.tilemap.process_layer(my_map,\r\n coins_layer_name,\r\n TILE_SCALING,\r\n use_spatial_hash=True)\r\n\r\n # -- Don't Touch Layer\r\n self.dont_touch_list = arcade.tilemap.process_layer(my_map,\r\n dont_touch_layer_name,\r\n TILE_SCALING,\r\n use_spatial_hash=True)\r\n\r\n # --- Other stuff\r\n # Set the background color\r\n if my_map.background_color:\r\n arcade.set_background_color(my_map.background_color)\r\n\r\n # Create the 'physics engine'\r\n self.physics_engine = arcade.PhysicsEnginePlatformer(self.player_sprite,\r\n self.wall_list,\r\n GRAVITY)", "def load_players(self):\n\n self.player_layer = MousePlayerController()\n self.player_layer.player.push_handlers(self)\n self.add(self.player_layer)\n\n if self.saved_player is not None:\n self.player_layer.player.set_params(self.saved_player)", "def __init__(self):\n\n super().__init__()\n self.setup_janggi_game()\n self._game_state = 'UNFINISHED'\n self._player_turn = 'BLUE'", "def setup(self):\n\n # Used to keep track of our scrolling\n self.view_bottom = 0\n self.view_left = 0\n\n # Keep track of the score\n self.score = 0\n\n # Create the Sprite lists\n self.player_list = arcade.SpriteList()\n self.wall_list = arcade.SpriteList()\n self.coin_list = arcade.SpriteList()\n\n # Set up the player, specifically placing it at these coordinates.\n # image_source = \":resources:images/animated_characters/female_adventurer/femaleAdventurer_idle.png\"\n self.player_list = arcade.SpriteList()\n self.player_sprite = Player()\n self.player_sprite.center_x = 256\n self.player_sprite.center_y = 256\n self.player_list.append(self.player_sprite)\n\n # --- Load in a map from the tiled editor ---\n\n # Name of map file to load\n map_name = r\"Math_Game\\floor_is_lava.tmx\"\n # Name of the layer in the file that has our platforms/walls\n platforms_layer_name = 'Platforms'\n\n # Read in the tiled map\n my_map = arcade.tilemap.read_tmx(map_name)\n\n # -- Platforms\n self.wall_list = arcade.tilemap.process_layer(map_object=my_map,\n layer_name='Platforms',\n base_directory=r'C:\\Users\\katel\\Desktop\\CSE310\\group_project\\Math_Game\\platformer-art-complete-pack-0\\Base pack\\Tiles',\n scaling=TILE_SCALING,\n use_spatial_hash=True, hit_box_algorithm=\"Simple\", hit_box_detail=4.5)\n\n # --- Other stuff\n # Set the background color\n if my_map.background_color:\n arcade.set_background_color(my_map.background_color)\n\n # Create the 'physics engine'\n self.physics_engine = arcade.PhysicsEnginePlatformer(self.player_sprite,\n self.wall_list,\n GRAVITY)", "def __init__(self, width, height, title):\r\n super().__init__(width, height, title)\r\n\r\n # door progress list\r\n self.doors_progress_list = None\r\n\r\n # door return list\r\n self.doors_return_list = None\r\n\r\n # wall list\r\n self.wall_list = None\r\n\r\n # npc list\r\n self.npc_list = None\r\n\r\n # boss list\r\n self.boss_list = None\r\n\r\n # EnemiesShoot\r\n self.enemies_shoot_list = None\r\n\r\n # Enemies\r\n self.enemies_list = None\r\n\r\n # locked blocks\r\n self.locked_blocks_list = None\r\n\r\n # breakable blocks\r\n self.breakable_blocks_list = None\r\n\r\n # Movable blocks\r\n self.movable_blocks_list = None\r\n\r\n # switch blocks\r\n self.switch_blocks_list = None\r\n\r\n # keys\r\n self.keys_list = None\r\n\r\n # hearts\r\n self.hearts_list = None\r\n\r\n # switches\r\n self.switches_list = None\r\n\r\n # moving platforms horizontal\r\n self.moving_plat_horizontal_list = None\r\n\r\n # moving platforms vertical\r\n self.moving_plat_vertical_list = None\r\n\r\n # bounce the platforms horizontal\r\n self.bounce_moving_plat_horizontal_list = None\r\n\r\n # bounce the platforms vertical\r\n self.bounce_moving_plat_vertical_list = None\r\n\r\n # platforms\r\n self.platforms_list = None\r\n\r\n # dont touch\r\n self.dont_touch_list = None\r\n\r\n # back ground list\r\n self.background_list = None\r\n\r\n # Sprite lists\r\n self.player_list = None\r\n\r\n # Set up the player\r\n self.player = None\r\n\r\n # physics engine\r\n self.physics_engine = None\r\n\r\n # map change\r\n self.map_change = 1\r\n\r\n # Used to keep track of our scrolling\r\n self.view_bottom = 0\r\n self.view_left = 0", "def setUp(self):\n self.player = ship.Player(\n constants.PLAYER_START_PLACE,\n constants.PLAYER_WIDTH,\n constants.PLAYER_HEIGHT,\n constants.PLAYER_IMG,\n constants.PLAYER_HEALTH\n )\n\n self.alien = ship.Alien(\n [320, 300],\n 30,\n 30,\n constants.GREEN_ALIEN_IMG,\n 1\n )\n\n self.alien.shooting([320, 300], 5, False)\n\n self.player.shooting([self.player.position[0] + 3, self.player.position[1]], 1, True)", "def setup(self):\n build_world.start_level(self)", "def setup(self):\n\n # Sprite lists\n self.player_list = arcade.SpriteList()\n self.wall_list = arcade.SpriteList(use_spatial_hash=True,\n spatial_hash_cell_size=128)\n self.enemy_list = arcade.SpriteList()\n\n # Set up the player\n resource = \":resources:images/animated_characters/\" \\\n \"female_person/femalePerson_idle.png\"\n self.player = arcade.Sprite(resource, scale=SPRITE_SCALING)\n self.player.center_x = SPRITE_SIZE * 5\n self.player.center_y = SPRITE_SIZE * 1\n self.player_list.append(self.player)\n\n # Set enemies\n resource = \":resources:images/animated_characters/zombie/zombie_idle.png\"\n enemy = arcade.Sprite(resource, scale=SPRITE_SCALING)\n enemy.center_x = SPRITE_SIZE * 4\n enemy.center_y = SPRITE_SIZE * 7\n self.enemy_list.append(enemy)\n\n spacing = SPRITE_SIZE * 3\n for column in range(10):\n for row in range(15):\n sprite = arcade.Sprite(\":resources:images/tiles/grassCenter.png\",\n scale=SPRITE_SCALING)\n\n x = (column + 1) * spacing\n y = (row + 1) * sprite.height\n\n sprite.center_x = x\n sprite.center_y = y\n if random.randrange(100) > 30:\n self.wall_list.append(sprite)\n\n self.physics_engine = arcade.PhysicsEngineSimple(self.player,\n self.wall_list)\n\n # --- Path related\n # This variable holds the travel-path. We keep it as an attribute so\n # we can calculate it in on_update, and draw it in on_draw.\n self.path = None\n # Grid size for calculations. The smaller the grid, the longer the time\n # for calculations. Make sure the grid aligns with the sprite wall grid,\n # or some openings might be missed.\n grid_size = SPRITE_SIZE\n\n # Calculate the playing field size. We can't generate paths outside of\n # this.\n playing_field_left_boundary = -SPRITE_SIZE * 2\n playing_field_right_boundary = SPRITE_SIZE * 35\n playing_field_top_boundary = SPRITE_SIZE * 17\n playing_field_bottom_boundary = -SPRITE_SIZE * 2\n\n # This calculates a list of barriers. By calculating it here in the\n # init, we are assuming this list does not change. In this example,\n # our walls don't move, so that is ok. If we want moving barriers (such as\n # moving platforms or enemies) we need to recalculate. This can be an\n # time-intensive process depending on the playing field size and grid\n # resolution.\n\n # Note: If the enemy sprites are the same size, we only need to calculate\n # one of these. We do NOT need a different one for each enemy. The sprite\n # is just used for a size calculation.\n self.barrier_list = arcade.AStarBarrierList(enemy,\n self.wall_list,\n grid_size,\n playing_field_left_boundary,\n playing_field_right_boundary,\n playing_field_bottom_boundary,\n playing_field_top_boundary)", "def setup(self): \n # Navigate to POS screen\n pos.connect()", "def init_script(cls):\n cls.add_scripting_event(\n \"login\"\n ).set_help(\"\"\"\n When a character logs in or is controlled by a player.\n This scripting event is called after a player has connected to this character, either has part of standard login, or after an admin has taken control of this character.\n \"\"\")", "def __init__(self, event):\n self.event = event\n\n self.valid_commands = {\n 'help': help.HelpPlugin,\n 'karma': karma.KarmaPlugin,\n 'karma_newest': karma.KarmaNewestPlugin,\n 'karma_top': karma.KarmaTopPlugin,\n 'karma_bottom': karma.KarmaBottomPlugin,\n 'roll': roll.RollPlugin,\n 'quest': quest.QuestPlugin,\n 'attr': attribute.AttrPlugin,\n }\n\n self.valid_suffixes = {\n '++': karma.KarmaModifyPlugin,\n '--': karma.KarmaModifyPlugin,\n }\n\n self.bot = SlackHandler()", "def on_enter(self):\n\n super(BaseScene, self).on_enter()\n\n self.load_map()\n self.load_players()\n self.load_enemies()\n self.load_status_bar()\n\n self.enemies_layer.next_wave()", "def setup(self):\n self.setup_button_handlers()\n\n # Enable various plugin pollers if enabled in the config.\n # Note: plugins defined as instance variables to prevent\n # their pollers from being garbage collected.\n if self.config[\"plugins\"][\"openweathermap.org\"][\"enabled\"]:\n from src.plugins import weather\n self.weather_plugin = weather.WeatherPlugin(self)\n self.weather_plugin.create_widgets()\n self.weather_plugin.setup_polling()\n\n if self.config[\"plugins\"][\"HSL\"][\"enabled\"]:\n from src.plugins import trains\n self.train_plugin = trains.TrainPlugin(self)\n self.train_plugin.create_widgets()\n self.train_plugin.setup_polling()\n\n if self.config[\"plugins\"][\"DHT22\"][\"enabled\"]:\n from src.plugins import dht22\n self.dht22_plugin = dht22.DHT22Plugin(self)\n self.dht22_plugin.create_widgets()\n self.dht22_plugin.setup_polling()\n\n # Set a higher row streches to the last used row to push elements\n # closer together\n nrows = self.main_window.right_plugin_grid.rowCount()\n self.main_window.right_plugin_grid.setRowStretch(nrows-1, 1)\n\n # Setup settings window's checkbox initial values:\n tts_enabled = self.config[\"main\"][\"TTS\"]\n self.settings_window.readaloud_checkbox.setChecked(tts_enabled)\n\n nightmode = self.config[\"main\"][\"nighttime\"].get(\"enabled\", False)\n self.settings_window.nightmode_checkbox.setChecked(nightmode)\n\n # Store nighttime range as datetimes to config.\n start_dt = utils.time_str_to_dt(self.config[\"main\"][\"nighttime\"][\"start\"])\n end_dt = utils.time_str_to_dt(self.config[\"main\"][\"nighttime\"][\"end\"])\n\n # Ensure start is before end \n if end_dt <= start_dt:\n end_dt = end_dt + timedelta(1)\n\n self.config[\"main\"][\"nighttime\"].update({\n \"start_dt\": start_dt,\n \"end_dt\": end_dt\n })\n\n # Set a timer to update the range on next nighttime end\n self.nighttime_update_timer = QTimer(self.main_window)\n self.nighttime_update_timer.setSingleShot(True)\n self.nighttime_update_timer.timeout.connect(self._update_nighttime_range)\n\n DELAY_UNTIL_DAYTIME = int((self.config[\"main\"][\"nighttime\"][\"end_dt\"] - datetime.now()).total_seconds())\n self.nighttime_update_timer.start(DELAY_UNTIL_DAYTIME*1000)\n\n alarm_brightness_enabled = self.config[\"main\"][\"full_brightness_on_alarm\"]\n self.settings_window.alarm_brightness_checkbox.setChecked(alarm_brightness_enabled)\n\n # Set main window's alarm time display to currently active alarm time\n alarm_time = self.get_current_active_alarm()\n if alarm_time:\n self.main_window.alarm_time_lcd.display(alarm_time)\n\n self.screen_blank_timer = QTimer(self.main_window)\n self.screen_blank_timer.setSingleShot(True)\n self.screen_blank_timer.timeout.connect(self.blank_screen_and_hide_control_buttons)\n\n self.main_window.mouseReleaseEvent = self.on_release_event_handler\n\n # Set radio stations from config to the settings window options\n self.radio_streams = self.config[\"radio\"][\"urls\"]\n self.settings_window.radio_station_combo_box.addItems(self.radio_streams.keys())\n\n # Ensure station set as default is set as current item\n default_station = self.config[\"radio\"][\"default\"]\n self.settings_window.radio_station_combo_box.setCurrentText(default_station)", "def setup(self, forward, feedback, character_id):\n self.background = arcade.load_texture(f\"{DATA_DIR}/14.png\")\n\n self.assigned_player = int(character_id) + 1\n\n # Sprite lists\n self.player_list = arcade.SpriteList()\n\n # jet sprites\n self.player1 = arcade.Sprite(f\"{DATA_DIR}/player1.png\", SPRITE_SCALING_PLAYER)\n self.player2 = arcade.Sprite(f\"{DATA_DIR}/player2.png\", SPRITE_SCALING_PLAYER)\n self.player3 = arcade.Sprite(f\"{DATA_DIR}/player3.png\", SPRITE_SCALING_PLAYER)\n\n self.player1.center_x = 100\n self.player1.center_y = 100\n\n self.player2.center_x = 500\n self.player2.center_y = 100\n\n self.player3.center_x = 1000\n self.player3.center_y = 100\n\n self.player_list.append(self.player1)\n self.player_list.append(self.player2)\n self.player_list.append(self.player3)\n\n build = Build(scale=0.1, image=f\"{DATA_DIR}/11.png\")\n build.lay((0, 1000, 15), \"x\", 10)\n self.wall_list = build.blocks\n\n self.physics_engine = arcade.PhysicsEnginePlatformer(\n getattr(self, f\"player{self.assigned_player}\"), self.wall_list, GRAVITY\n )\n\n self.forward = forward\n self.feedback = feedback\n if (\n os.getenv(\"SERVER\") == \"127.0.0.1\"\n or os.getenv(\"SERVER\") == socket.gethostname()\n ):\n arcade.schedule(self.stream, 0.2)", "def _initialize(self):\n self.send_init_command()", "def setupWidget(self):\r\n self.generateCoordinates()\r\n self.modifyCoordinateLists()\r\n self.settings.movementMatrix = self.movementMatrix\r\n self.settings.ghostIntersectionList = self.ghostIntersectionList\r\n self.createBodies()\r\n print(\"GameW set\")", "def game_setup(self):\n self.deck = Shoe(6)\n self.initial_draw()\n self.pot = ask_for_bet(self.player.money)\n show_table(self.player, self.dealer, self.pot)\n self.surrender_and_insurance()", "def setup(bot):\n @bot.event\n async def on_command_error(ctx, error):\n if isinstance(error, TimeoutError):\n return\n print(error)\n\n @bot.event\n async def on_ready():\n print('Logged in as:\\n{0} (ID: {0.id})'.format(bot.user))\n bot.load_extension('cogs.music')\n bot.load_extension('cogs.utility')\n bot.load_extension('cogs.general')\n bot.load_extension('cogs.fun')\n await bot.change_presence(activity=discord.Game(Bot.PREFIX+\"help\"))", "def loadPlayerCommands(self, player):\n player.addCommand('get', self.commands['get']())\n player.addCommand('drop', self.commands['drop']())\n player.addCommand('go', self.commands['go']())\n player.addCommand('say', self.commands['say']())\n player.addCommand('look', self.commands['look']())\n player.addCommand('quit', self.commands['quit']())\n player.addCommand('commands', self.commands['commands']())\n player.addCommand('color', self.commands['color']())", "def on_start(self):\n self.init()", "def __init__(self):\n # clears the console window\n if sys.platform in ('linux-i386','linux2'):\n os.system(\"clear\")\n elif sys.platform in ('win32','dos','ms-dos'):\n os.system(\"cls\")\n\n # print scripts info\n print self.WELCOME_MESSAGE\n\n # initialize all instance variables\n self.guiElements = {} # dictionary of gui elements (buttons, strings, sliders, ...)\n self.gui_events = [] # list of events\n self.gui_event_ids = {} # dictionary of event ids\n self.config = {} # configuration dictionary\n self.target = None # import or export\n self.callback = None # function to call when config gui is done\n self.texpathIndex = 0\n self.texpathCurrent = ''\n\n # reset GUI coordinates\n self.xPos = self.XORIGIN\n self.yPos = self.YORIGIN + Blender.Window.GetAreaSize()[1]\n\n # load configuration\n self.load()", "def __init__(self, settings):\n super().__init__(settings, self.player_info_url, Player)", "def start(self):\n self.player = Player()\n self.dealer = Dealer()\n self.pot = 0\n self.side_bet = 0\n start_game()", "def event_handler(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.RENDER_FRAME = False\n\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n # exit game on ESCAPE\n self.RENDER_FRAME = False\n\n elif event.key == pygame.K_a:\n self.player.move_direction[\"left\"] = True\n\n elif event.key == pygame.K_d:\n self.player.move_direction[\"right\"] = True\n\n elif event.key == pygame.K_SPACE:\n self.player.move_direction[\"up\"] = True\n\n elif event.key == pygame.K_h:\n self.player.move_direction[\"attack\"] = True\n\n elif event.key == pygame.K_k:\n # NOTE: testing purpose only\n self.player.health = 0\n\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_a:\n self.player.move_direction[\"left\"] = False\n elif event.key == pygame.K_d:\n self.player.move_direction[\"right\"] = False", "def init_bot(self):\n dispatcher = self.updater.dispatcher\n\n dispatcher.add_handler(CommandHandler(\"start\", self.on_bot_start))\n dispatcher.add_handler(CommandHandler(\"help\", self.on_bot_help))\n dispatcher.add_handler(CommandHandler(\"about\", self.on_bot_about))\n dispatcher.add_handler(CommandHandler(\"vreausaajut\", self.on_bot_offer_to_help))\n dispatcher.add_handler(CommandHandler(\"status\", self.on_status))\n dispatcher.add_handler(CommandHandler(\"Da\", self.on_accept))\n dispatcher.add_handler(CommandHandler(\"Nu\", self.on_reject))\n\n dispatcher.add_handler(CallbackQueryHandler(self.negotiate_time, pattern=\"^eta.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_dispatch, pattern=\"^caution.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_handle, pattern=\"^handle.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_wellbeing, pattern=\"^state.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_symptom, pattern=\"^symptom.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_wouldyou, pattern=\"^wouldyou.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_further, pattern=\"^further.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_activities, pattern=\"^assist.*\"))\n\n dispatcher.add_handler(MessageHandler(Filters.photo, self.on_photo))\n dispatcher.add_handler(MessageHandler(Filters.contact, self.on_contact))\n dispatcher.add_handler(MessageHandler(Filters.text, self.on_text_message))\n dispatcher.add_error_handler(self.on_bot_error)", "def __init__(self):\n\n self.controller = None\n\n self.game_running = False\n self.menu_view_running = False\n self.end_game_running = False", "def controls_setup(self):\n pass", "def init(self):\n\n pygame.init()\n pygame.display.set_mode((640, 480))\n pygame.display.set_caption(\"Gears 4 Geeks\")\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.ser = serial.Serial('COM4', 9600)\n\n #ADAFRUIT_IO_KEY = 'd1a1bd3737714fa488e0364c775a4b4d' ##This will only be good until the end of the competition\n #self.aio = Client(ADAFRUIT_IO_KEY)", "def setup(self):\n arcade.set_background_color(BACKGROUND_COLOR)\n\n self.sprite_list.append(self.ada)\n self.sprite_list.append(self.potato)", "def __init__(self, player_control, players=None):\r\n self.player_control = player_control\r\n self.players = {} # copy for restoration\r\n if players is not None:\r\n for player in players.values():\r\n self.add_player(player)", "def newPlayer():\r\n pass", "async def set_player(self, player: Optional[andesite.Player]) -> None:\n ...", "def __init__(self):\n self._eng = pyttsx.init()\n self._eng.connect(\"started-utterance\", self._onStart)\n self._eng.connect(\"started-word\", self._onWord)\n self._eng.connect(\"finished-utterance\", self._onEnd)", "def connect_controls_to_player(self,\n enable_jump: bool = True,\n enable_punch: bool = True,\n enable_pickup: bool = True,\n enable_bomb: bool = True,\n enable_run: bool = True,\n enable_fly: bool = True) -> None:\n player = self.getplayer(ba.Player)\n assert player\n\n # Reset any currently connected player and/or the player we're\n # wiring up.\n if self._connected_to_player:\n if player != self._connected_to_player:\n player.resetinput()\n self.disconnect_controls_from_player()\n else:\n player.resetinput()\n\n player.assigninput(ba.InputType.UP_DOWN, self.on_move_up_down)\n player.assigninput(ba.InputType.LEFT_RIGHT, self.on_move_left_right)\n player.assigninput(ba.InputType.HOLD_POSITION_PRESS,\n self.on_hold_position_press)\n player.assigninput(ba.InputType.HOLD_POSITION_RELEASE,\n self.on_hold_position_release)\n intp = ba.InputType\n if enable_jump:\n player.assigninput(intp.JUMP_PRESS, self.on_jump_press)\n player.assigninput(intp.JUMP_RELEASE, self.on_jump_release)\n if enable_pickup:\n player.assigninput(intp.PICK_UP_PRESS, self.on_pickup_press)\n player.assigninput(intp.PICK_UP_RELEASE, self.on_pickup_release)\n if enable_punch:\n player.assigninput(intp.PUNCH_PRESS, self.on_punch_press)\n player.assigninput(intp.PUNCH_RELEASE, self.on_punch_release)\n if enable_bomb:\n player.assigninput(intp.BOMB_PRESS, self.on_bomb_press)\n player.assigninput(intp.BOMB_RELEASE, self.on_bomb_release)\n if enable_run:\n player.assigninput(intp.RUN, self.on_run)\n if enable_fly:\n player.assigninput(intp.FLY_PRESS, self.on_fly_press)\n player.assigninput(intp.FLY_RELEASE, self.on_fly_release)\n\n self._connected_to_player = player", "def setup(self):\n\n self._enable_torque(self._reg.TORQUE_ENABLE)\n self.change_operating_mode(self._reg.MODE_EXT_POSI)\n # set to max velocity\n self.change_veloity(self._default_velocity)", "def __init__(self, pos, inventory=None):\n _GenericBot.__init__(self, pos, inventory)\n self._changes = {} # Changes to the world", "def setup(cls):\n super().setup()\n cls.default_dialogues = cast(\n DefaultDialogues, cls._skill.skill_context.default_dialogues\n )\n cls.tac_dialogues = cast(TacDialogues, cls._skill.skill_context.tac_dialogues)\n cls.oef_search_dialogues = cast(\n OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues\n )", "def config_init(self):\n\n game_opts = [\n\n # Execution Options\n ('debug',False), # Toggle Debug Messaging\n ('log_path',False), # Turn on logging (w/path)\n ('log_lvl',logging.DEBUG), # Set log level\n\n # World Generation Options\n ('flex_limit',3) # Sets the maximum variance\n\n ]\n\n # Attempts to pull each value from the configuration\n # if not in config, the default value defined above\n # is set instead\n for opt in game_opts:\n try:\n setattr(self,opt[0],self.conf.conf_dict[opt[0]])\n except:\n setattr(self,opt[0],opt[1])\n continue", "def __init__(self, config):\n self.config = config\n # 2D array describing the game arena\n self.arena = [[Tile.EMPTY for y in range(0, config.arena_size[1])] for x in range(0, config.arena_size[0])]\n self.player = Player((config.arena_size[0] // 2, config.arena_size[1] // 2))\n # Queue of tail segments\n self.tails = deque()\n # The current tick number\n self.tick = 0\n # True if the game is over\n self.game_over = False\n # True if the game should exit\n self.exit = False", "def __init__(self,player,name,playerFn):\n\t\tself.player = player\n\t\tself.name = name\n\t\tself.playerFn = playerFn", "def __init__(self, players):\n\n # Instantiate a Players object with the players queue\n self._players = Players(players)\n # Instantiate the Die to be used for the current game\n self._die = Die()\n # Track the game status\n self._active_turn = True\n self._end_game = False", "def __init__(self, label, player):\n\n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[8,0],[-8,0],[0,9],[0,-9]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]},\\\n 'BLUE':{'board':[[8,0],[-8,0],[0,9],[0,-9]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]}\\\n })", "def __init__(self, label, player):\n\n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[8,0],[-8,0],[0,9],[0,-9]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]},\\\n 'BLUE':{'board':[[8,0],[-8,0],[0,9],[0,-9]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]}\\\n })", "def prepare(self, level):\n self.greeterboard.welcome_player(\n i18n.OUT_MSG_LUCK.format(self.player_name)\n )\n self.scoreboard.set_labels()\n self.scoreboard.set_level(level)\n self.word_view.setText(i18n.OUT_MSG_NEW_GAME)\n self.init_game_metrics()", "def __init__(self, *args, **kwargs):\n env_vars = [\n (\"SDL_FBDEV\", \"/dev/fb1\"),\n (\"SDL_MOUSEDEV\", \"/dev/input/touchscreen\"),\n (\"SDL_MOUSEDRV\", \"TSLIB\"),\n ]\n for var_name, val in env_vars:\n os.environ[var_name] = val\n self.challenge_thread = None\n self.timeout = kwargs.pop('timeout', 120)\n self.markers = aruco.Dictionary_create(6, 3)\n self.running_challenge = None", "def __init__(self, s_width, s_height, setup):\n pygame.init()\n pygame.font.init()\n\n self.arcade = False\n fullscreen = False\n for opt in setup:\n if opt == Setup.Arcade:\n self.arcade = True\n elif opt == Setup.Fullscreen:\n fullscreen = True\n \n self.joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]\n for j in self.joysticks:\n j.init()\n\n self.display = Display((s_width, s_height), fullscreen)\n self.clock = pygame.time.Clock()\n self.FPS = 60\n\n self.ui = UI(self. display)\n if self.arcade:\n if len(self.joysticks) == 0: \n print(\"=================== plug in the controller ===================\") \n exit(1)\n self.ui.enable_arcade_mode()\n \n self.selected_speed = \"speed Medium\"\n self.game_manager = GameManager(self.display, self.ui, GameMode.EatToGrow, GameState.Menu)", "def setup(player, level):\n display('f1', 'inventory', player._inventory)\n\n maze(callback=partial(image, 'stone'))\n\n player.keys(right = 'd', left = 'a', up = 'w', down = 's')\n\n # randomly pick a background\n background()\n\n player.take(Punch(call='1'))\n player.take(FlameThrower(call='2'))\n player.take(Grenade(call='3', distance=6, radius=10))\n player.take(MustardGas(call='4', distance=10, radius=20))\n player.take(AirGun(call='space'))\n player.take(MachineGun(call='5', distance=15, repeat=3))\n player.take(Landmine(call='6', delay=1))\n player.take(C4(call='7', detonate='8', distance=8, radius=10))\n player.take(NuclearBomb(call='n'))\n\n player.take(WallBuster())\n #wall = partial(image, 'stone')\n #player.take(WallBuilder(left='left', right='right', front='up', back='down', wall=wall))\n display('f1', 'inventory', player._inventory)\n\n def drink(soda, player):\n soda.destroy()\n player.energy = 10\n fill(partial(image,'sprite', size=1.0), 0.05, player, drink)\n\n def claim(coin, player):\n coin.destroy()\n player.wealth = 5\n fill(partial(image,'coin', size=1.0), 0.25, player, claim)", "def __init__(self):\n super().__init__()\n\n self._registry = {}\n el = gremlin.event_handler.EventListener()\n el.joystick_event.connect(self._joystick_cb)", "def __initialize_grid_dimensions(self) -> None:\n if len(self.players) > 3:\n self.num_columns = len(self.players)\n self.num_rows = self.num_cells // len(self.players)\n else:\n self.num_columns = self.num_cells // len(self.players)\n self.num_rows = len(self.players)\n self.grid_handler = GridHandler(self.num_columns, self.num_rows)", "def setup(cls):\n cls.location = {\"longitude\": 0.1270, \"latitude\": 51.5194}\n cls.search_query = {\n \"search_key\": \"intro_service\",\n \"search_value\": \"intro_alice\",\n \"constraint_type\": \"==\",\n }\n cls.search_radius = 5.0\n cls.admin_host = \"127.0.0.1\"\n cls.admin_port = 8021\n cls.ledger_url = \"http://127.0.0.1:9000\"\n config_overrides = {\n \"models\": {\n \"strategy\": {\n \"args\": {\n \"location\": cls.location,\n \"search_query\": cls.search_query,\n \"search_radius\": cls.search_radius,\n \"admin_host\": cls.admin_host,\n \"admin_port\": cls.admin_port,\n \"ledger_url\": cls.ledger_url,\n }\n }\n },\n }\n\n super().setup(config_overrides=config_overrides)\n\n # behaviours\n cls.faber_behaviour = cast(\n FaberBehaviour,\n cls._skill.skill_context.behaviours.faber,\n )\n\n # dialogues\n cls.default_dialogues = cast(\n DefaultDialogues, cls._skill.skill_context.default_dialogues\n )\n cls.http_dialogues = cast(\n HttpDialogues, cls._skill.skill_context.http_dialogues\n )\n cls.oef_search_dialogues = cast(\n OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues\n )\n\n # handlers\n cls.http_handler = cast(HttpHandler, cls._skill.skill_context.handlers.http)\n cls.oef_search_handler = cast(\n OefSearchHandler, cls._skill.skill_context.handlers.oef_search\n )\n\n # models\n cls.strategy = cast(Strategy, cls._skill.skill_context.strategy)\n\n cls.logger = cls._skill.skill_context.logger\n\n # mocked objects\n cls.mocked_method = \"SOME_METHOD\"\n cls.mocked_url = \"www.some-url.com\"\n cls.mocked_version = \"some_version\"\n cls.mocked_headers = \"some_headers\"\n cls.body_dict = {\"some_key\": \"some_value\"}\n cls.body_str = \"some_body\"\n cls.body_bytes = b\"some_body\"\n cls.mocked_body_bytes = json.dumps(cls.body_str).encode(\"utf-8\")\n cls.mocked_query = Query(\n [Constraint(\"some_attribute_name\", ConstraintType(\"==\", \"some_value\"))],\n DataModel(\n \"some_data_model_name\",\n [\n Attribute(\n \"some_attribute_name\",\n str,\n False,\n \"Some attribute descriptions.\",\n )\n ],\n ),\n )\n cls.mocked_proposal = Description(\n {\n \"contract_address\": \"some_contract_address\",\n \"token_id\": \"123456\",\n \"trade_nonce\": \"876438756348568\",\n \"from_supply\": \"543\",\n \"to_supply\": \"432\",\n \"value\": \"67\",\n }\n )\n\n # list of messages\n cls.list_of_http_messages = (\n DialogueMessage(\n HttpMessage.Performative.REQUEST,\n {\n \"method\": cls.mocked_method,\n \"url\": cls.mocked_url,\n \"headers\": cls.mocked_headers,\n \"version\": cls.mocked_version,\n \"body\": cls.mocked_body_bytes,\n },\n is_incoming=False,\n ),\n )\n\n cls.list_of_oef_search_messages = (\n DialogueMessage(\n OefSearchMessage.Performative.SEARCH_SERVICES,\n {\"query\": cls.mocked_query},\n ),\n )", "def ready(self):\r\n\t\t# Remove attract mode from mode queue - Necessary?\r\n\t\tself.game.modes.remove(self)\r\n\t\t# Initialize game\t\r\n\t\tself.game.start_game()\r\n\t\t# Add the first player\r\n\t\tself.game.add_player()\r\n #self.game.add_player()\r\n\t\t# Start the ball. This includes ejecting a ball from the trough.\r\n\t\tself.game.start_ball()", "def setup(self):\n # TODO : figure out how to make the map interface a singleton class\n\n if not hasattr(self, 'mapInterface'):\n self.mapInterface = MapInterface(settings['FILE_CONFIG']['filename'])", "def on_load(self):\n self.__init__()", "def __init__(self, game):\n super().__init__()\n self.game = game\n self.minsize(700, 600)\n self.wm_title('LOLO')\n\n LoloLogo(self)\n auto = AutoPlayingGame(self)\n\n play_game = tk.Button(self, text=\"Play game\",\n command=self.play_game)\n play_game.place(relx=0.15, rely=0.3, anchor=tk.CENTER)\n\n highscores = tk.Button(self, text=\"Highscores\", command=self.hs)\n highscores.place(relx=0.15, rely=0.4, anchor=tk.CENTER)\n\n exit = tk.Button(self, text=\"Exit\", command=self.exit)\n exit.place(relx=0.15, rely=0.5, anchor=tk.CENTER)\n\n self.name = tk.StringVar()\n self.name.set(\"Anon\")\n name_entry = tk.Entry(self, textvariable=self.name)\n name_entry.pack()\n\n\n self.gamemode = tk.StringVar()\n self.gamemode.set(\"Regular\")\n gamemodeselect = tk.OptionMenu(self, self.gamemode, \"Regular\",\n \"Lucky 7\", \"Make 13\", \"Unlimited\")\n gamemodeselect.place(anchor=tk.NW)\n self.mainloop()", "def __init__(self, client, game):\n super().__init__(client, game)\n self.actions = {} # deprecated\n self.state_handlers = {}\n self.add_handler(\"INIT\", self._initialize)\n self.add_handler(\"STOP\", self._stop)", "def initGameState(self):\n print(\"Setting game state: \")\n self.playGUI = GUI()\n self.playGUI.drawBoard(self.player)", "def __init__(self):\n super().__init__()\n\n # Track the current state of what key is pressed\n self.left_pressed = False\n self.right_pressed = False\n self.up_pressed = False\n self.down_pressed = False\n self.shoot_pressed = False\n self.jump_needs_reset = False\n\n # Our TileMap Object\n self.tile_map = None\n\n # Our Scene Object\n self.scene = None\n\n # Separate variable that holds the player sprite\n self.player_sprite = None\n\n # Our 'physics' engine\n self.physics_engine = None\n\n # A Camera that can be used for scrolling the screen\n self.camera = None\n\n # A Camera that can be used to draw GUI elements\n self.gui_camera = None\n\n self.end_of_map = 0\n\n # Keep track of the score\n self.score = 0\n\n # Shooting mechanics\n self.can_shoot = False\n self.shoot_timer = 0\n\n # Load sounds\n self.collect_coin_sound = arcade.load_sound(\":resources:sounds/coin1.wav\")\n self.jump_sound = arcade.load_sound(\":resources:sounds/jump1.wav\")\n self.game_over = arcade.load_sound(\":resources:sounds/gameover1.wav\")\n self.shoot_sound = arcade.load_sound(\":resources:sounds/hurt5.wav\")\n self.hit_sound = arcade.load_sound(\":resources:sounds/hit5.wav\")", "def __init__(self):\n\t\tself.playercolider()", "def __init__(self):\n # zu Beginn ist noch kein Modus gesetzt\n self.mode = None\n # zu Beginn sind noch keine Channels/ Pins konfiguriert\n self.channels = {}\n # es sind zu Beginn auch noch keine callbacks fuer events hinzugefuegt\n self.events = []", "def _initControls(self):\n\n print \"DEBUG: Initializing Controls\"\n Game.Controls[pygame.K_a] = Game.MoveLeft\n Game.Controls[pygame.K_d] = Game.MoveRight\n Game.Controls[pygame.K_w] = Game.Jump\n Game.Controls[pygame.K_s] = Game.Duck\n Game.Controls[pygame.K_SPACE] = Game.Fly\n Game.Controls[pygame.K_j] = Game.Fire\n Game.Controls[pygame.K_ESCAPE] = Game.Quit\n\n Game.BoundControls.append(pygame.K_a)\n Game.BoundControls.append(pygame.K_d)\n Game.BoundControls.append(pygame.K_w)\n Game.BoundControls.append(pygame.K_s)\n Game.BoundControls.append(pygame.K_j)\n Game.BoundControls.append(pygame.K_SPACE)\n Game.BoundControls.append(pygame.K_ESCAPE)", "async def on_ready(self) -> None:\n print(\"Logged on as\", self.user)\n self.chat_ai = ChatAI() # Ready the GPT2 AI generator\n self.chat_ai.load_model() # Load the GPT2 model", "def __init__(self):\r\n self.players = {}", "def configure_widgets(self):\r\n\r\n # 'command' - callback function executed when button is pressed\r\n # since we can't pass it a function with arguments, we use the partial \r\n # function from the functools module\r\n self.btn_tl['command'] = partial(self.play, \"x\", (0,0))\r\n self.btn_tm['command'] = partial(self.play, \"x\", (0,1))\r\n self.btn_tr['command'] = partial(self.play, \"x\", (0,2))\r\n self.btn_ml['command'] = partial(self.play, \"x\", (1,0))\r\n self.btn_mm['command'] = partial(self.play, \"x\", (1,1))\r\n self.btn_mr['command'] = partial(self.play, \"x\", (1,2))\r\n self.btn_bl['command'] = partial(self.play, \"x\", (2,0))\r\n self.btn_bm['command'] = partial(self.play, \"x\", (2,1))\r\n self.btn_br['command'] = partial(self.play, \"x\", (2,2))\r\n\r\n self.btn_reset['text'] = \"Reset\"\r\n self.btn_reset['command'] = self.reset", "def __init__(self, label, player):\n\n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[0,-1],[-1,0],[1,0],[0,1]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]},\\\n 'BLUE':{'board':[[0,-1],[-1,0],[1,0],[0,1]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]}\\\n })", "def __init__(self, label, player):\n\n super().__init__(label, player)\n self.set_legal_moves({\\\n 'RED': {'board':[[0,-1],[-1,0],[1,0],[0,1]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]},\\\n 'BLUE':{'board':[[0,-1],[-1,0],[1,0],[0,1]],\\\n 'palace':[[-1,-1],[1,-1],[-1,1],[1,1]]}\\\n })", "def setup(bot):\n bot.add_cog(JokeCommands(bot))", "def setup(bot: KingArthur) -> None:\n bot.add_cog(Ed(bot))", "def __init__(self):\n #Screen configuration\n self.screen_width = 1200\n self.screen_height = 680\n self.bg_color = (0,20,50)\n \n #Hero configuration\n #Increase of ship speed to 1.5 pixels instead of 1\n #self.hero_speed_factor = 1.5\n self.hero_limit = 3\n \n #Syringes (bullets) configuration\n #self.bullet_speed_factor = 1\n self.bullets_allowed = 5\n \n #Covids configuration\n self.covid_vertical_speed_factor = 1\n #The value of the movement is negative because it is increasing\n # from the right to the left\n #self.covid_horizontal_speed_factor = -10\n #The pandemy direction equals 1 means to the bottom; -1 means to the top\n # The randint ensures an randomly direction when starting the game\n #if randint(0,1) == 1:\n # self.pandemy_direction = 1\n #else:\n # self.pandemy_direction = -1\n\n #The rate that increases the game speed\n self.speedup_scale = 1.1\n \n self.initialize_dynamic_settings()", "def _setup_kinematics(self):\n self.kin = Kinematics(robot_name=self.robot_name,\n offset=self.offset,\n active_joint_names=self.get_actuated_joint_names(),\n base_name=\"\", \n eef_name=None,\n frames=self.root\n )\n self._init_transform()", "def init_players(self):\n self.spaceships.append(self.player)\n SoundManager.add_sfx(\n self.player.states['exploded']['sfx'],\n self.player\n )", "def set_player(self, char_data):\n self.player = self.server.object_manager.add_player(char_data)", "def setup(self):\n # inicializamos el juego\n\n # Sprite lists\n self.player_list = arcade.SpriteList() # sera lista de personajes\n self.coin_list = arcade.SpriteList() # sera lista de monedas\n self.bullet_list = arcade.SpriteList() # lista de disparos\n\n # Set up the player\n self.score = 0\n\n # Image from kenney.nl\n # cargamos el sprite del jugador\n self.player_sprite = arcade.Sprite(\"character.png\", SPRITE_SCALING_PLAYER)\n # establecemos el inicio de posicion de nuestro jugador\n self.player_sprite.center_x = 50\n self.player_sprite.center_y = 70\n # lo agregamos a la lista de nuestros jugadores\n self.player_list.append(self.player_sprite)\n\n # Create the coins\n for i in range(COIN_COUNT):\n\n # Create the coin instance\n # Coin image from kenney.nl\n # cargamos las monedas\n coin = arcade.Sprite(\"coin_01.png\", SPRITE_SCALING_COIN)\n\n # Position the coin\n coin.center_x = random.randrange(SCREEN_WIDTH)\n coin.center_y = random.randrange(120, SCREEN_HEIGHT)\n\n # Add the coin to the lists\n # lo agregamos a la lista\n self.coin_list.append(coin)\n\n # Set the background color\n # esto aun nose para que sirve\n arcade.set_background_color(arcade.color.AMAZON)", "def appendPlayer(self, player):\n #if (not self.__configuring) and (not (player in self.__players)):\n if (not (player in self.__players)):\n self.__players.append(player)\n player.bind(self)\n if self.__playing:\n player.configure(self)\n self.__notifyPlayer(player, TetrisEvent.TETROMINO_NEXT, \n self.__nextTetroType)\n self.__notifyPlayer(player, TetrisEvent.TETROMINO_START, \n self.__nextTetroType)\n self.__notifyPlayer(player, TetrisEvent.BOARD_CHANGE, [])", "def setup(self):\n\n logger.info('Setting up SimulatedMaps module.')\n\n # Save the cls as a class attribute\n self.cls = self.read_cls()\n\n logger.info('Setup done!')", "def start(self):\n\n p = Parser()\n if self.event_status < 1:\n print(\"\\n\" * 100)\n self.game_intro()\n print(\"\\n\" * 100)\n\n playing = True\n while playing:\n self.check_upgrades()\n self.check_energy()\n self.check_event_status()\n cur_location = self.player.get_location()\n cur_location.print_description(self.event_status)\n cur_location.print_details(self.event_status)\n print_player_info(self.player)\n cur_location.set_visited(True)\n\n player_command = get_command()\n cmd_action, cmd_exit, cmd_direction, cmd_item, cmd_character = Parser.action_requested(player_command)\n\n print(\"\\n\" * 100)\n if cmd_action == GO:\n self.player.go_exit(self.event_status, direction=cmd_direction, exit_name=cmd_exit)\n\n elif cmd_action == TAKE:\n if cmd_item is None:\n print(\"You can't take that.\")\n else:\n self.player.take(cmd_item)\n\n elif cmd_action == DROP:\n if cmd_item is None:\n print(\"You can't drop that.\")\n else:\n self.player.drop(cmd_item)\n\n elif cmd_action == TALK:\n if cmd_character is None:\n print(\"You can't do talk to that.\")\n else:\n self.player.talk(cmd_character, self.event_status)\n\n elif cmd_action == LOOK:\n self.player.look(self.event_status)\n\n elif cmd_action == SAVEGAME:\n tmp_save_dir = input(\"Enter the save name\\n> \")\n if tmp_save_dir:\n save_dir = tmp_save_dir\n else:\n save_dir = None\n self.save(save_dir)\n\n elif cmd_action == QUIT:\n print(\"Exiting the game...\")\n return\n\n elif cmd_action == LOOK_AT:\n if cmd_item is None:\n print(\"You can't look at that.\")\n else:\n self.player.look_at(cmd_item)\n\n elif cmd_action == LISTEN:\n self.player.listen()\n\n elif cmd_action == PULL:\n if cmd_item is None:\n print(\"You can't pull that.\")\n else:\n self.pull(cmd_item)\n\n elif cmd_action == PUSH:\n if cmd_item is None:\n print(\"You can't push that.\")\n else:\n self.push(cmd_item)\n\n elif cmd_action == CHARGE:\n self.player.charge()\n\n elif cmd_action == USE:\n if cmd_item is None:\n print(\"You can't use that.\")\n else:\n self.use(cmd_item)\n\n elif cmd_action == WAIT:\n sleep_rate = 0.2\n print(\"You wait for a few moments...\")\n time.sleep(2)\n duration = time.time() + 5\n while time.time() < duration:\n print(\"\\n\" * 100)\n print(\"*\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"**\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"***\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"****\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"*****\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"****\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"***\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"**\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"*\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"Nothing happened...\")\n time.sleep(2)\n print(\"\\n\" * 100)\n\n elif cmd_action == HELP:\n self.help()\n # wait for user to finish reading\n input(\"Press 'enter' to continue.\")\n\n elif cmd_action == INVENTORY:\n self.player.print_inventory()\n\n elif cmd_action == LOADGAME:\n saved_games_dir = os.path.join(os.getcwd(), \"saved_games\")\n\n # Print Available Saved Games\n print(\"Enter the number of the game you want to load.\")\n saved_games = [game for game in os.listdir(saved_games_dir)]\n for index, sg in enumerate(saved_games):\n print(\"{0}. {1}\".format(index + 1, sg))\n\n # TODO error checking on user input\n user_game_selection = input(\">\")\n user_game = saved_games[int(user_game_selection) - 1]\n print(\"Loading game: {0}\".format(user_game))\n print(\"\\n\" * 100)\n self.load_game(os.path.join(saved_games_dir, user_game))\n else:\n print(\"Huh? That doesn't make any sense.\")" ]
[ "0.6703167", "0.6290751", "0.6279966", "0.6227849", "0.6143075", "0.6136186", "0.6016518", "0.60164726", "0.5974453", "0.5964595", "0.5954566", "0.5888326", "0.5855741", "0.5851218", "0.58509254", "0.5828278", "0.5817811", "0.5815114", "0.5780424", "0.57746065", "0.574305", "0.57356995", "0.5730389", "0.5726466", "0.57132995", "0.5710464", "0.56730163", "0.5669429", "0.5652953", "0.565014", "0.5645715", "0.56263596", "0.56228", "0.56009924", "0.5534652", "0.5516014", "0.55155134", "0.55143", "0.55005896", "0.5465078", "0.54648954", "0.545581", "0.5453507", "0.54521316", "0.5448441", "0.5437692", "0.54250395", "0.5415202", "0.54134923", "0.54004216", "0.5400029", "0.5397759", "0.5390785", "0.5387946", "0.53857154", "0.5384998", "0.5382587", "0.5376124", "0.53760624", "0.53656095", "0.53557736", "0.534954", "0.5332365", "0.53268504", "0.5325836", "0.5320457", "0.5320396", "0.5320396", "0.53096807", "0.53081346", "0.530333", "0.5300637", "0.52974737", "0.5295038", "0.52936727", "0.5291912", "0.5285073", "0.52838576", "0.528096", "0.5275778", "0.52751833", "0.52743584", "0.5272989", "0.52716565", "0.526991", "0.5268776", "0.5267755", "0.5262975", "0.5256544", "0.5256544", "0.52548957", "0.5253646", "0.5248177", "0.5243259", "0.5242945", "0.52417934", "0.52324325", "0.52295595", "0.52273893", "0.5226861" ]
0.57329845
22
Private helper to load SSL certificate from disk
def _load_ssl_certificate(self) -> ssl.SSLContext: sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) sslcontext.load_cert_chain( path.join(path.dirname(__file__), '..', '..', 'player.crt'), path.join(path.dirname(__file__), '..', '..', 'player.key') ) return sslcontext
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ssl_certificate():", "def load_cert_chain(self, certfile, keyfile: Optional[Any] = ...):\n ...", "def _load_ssl(self, ssl_options: tuple):\n try:\n self._ssl.load_cert_chain(certfile=ssl_options[0], keyfile=ssl_options[1], password=ssl_options[2])\n except IOError as e:\n self.logger.error(\"Unable to load certificate files: {}\".format(e))\n self.stop()", "def get_ssl_certificate() :", "def load_certificate(file_path: str, encoding: Encoding = None) -> Certificate:\n real_encoding = encoding or _get_encoding_type(file_path)\n\n def solve(certificate_data: bytes) -> Certificate:\n \"\"\"Determine the type of data and perform loading based on data type.\n\n :param certificate_data: given certificate data\n :return: loaded certificate\n \"\"\"\n return { # type: ignore\n Encoding.PEM: load_pem_x509_certificate,\n Encoding.DER: load_der_x509_certificate\n }[real_encoding](certificate_data, default_backend())\n\n return generic_load(file_path, solve)", "def _try_load_ca_cert(path):\n crt = crypto.load_certificate(crypto.FILETYPE_PEM,\n open(path, 'rb').read())\n if crt.has_expired():\n raise ValueError('CA certificate has expired.')\n if crt.get_signature_algorithm() in ('md5', 'sha1'):\n raise ValueError('CA certificate signed with MD5 or SHA1.')\n return crt", "def cert_file(self):\n return self._get('cert_file')", "def load_pem_x509_certificate(data):\n return _x509.load_pem_x509_certificate(data, _backends.default_backend())", "def load_cert(file, format=FORMAT_PEM):\n bio = BIO.openfile(file)\n if format == FORMAT_PEM:\n return load_cert_bio(bio)\n elif format == FORMAT_DER:\n cptr = m2.d2i_x509(bio._ptr())\n if cptr is None:\n raise X509Error(Err.get_error())\n return X509(cptr, _pyfree=1)\n else:\n raise ValueError(\"Unknown format. Must be either FORMAT_DER or FORMAT_PEM\")", "def load_x509_cert(url, httpc, spec2key, **get_args):\n try:\n r = httpc(\"GET\", url, allow_redirects=True, **get_args)\n if r.status_code == 200:\n cert = str(r.text)\n try:\n public_key = spec2key[cert] # If I've already seen it\n except KeyError:\n public_key = import_public_key_from_pem_data(cert)\n spec2key[cert] = public_key\n\n if isinstance(public_key, rsa.RSAPublicKey):\n return {\"rsa\": public_key}\n elif isinstance(public_key, ec.EllipticCurvePublicKey):\n return {\"ec\": public_key}\n else:\n raise Exception(\"HTTP Get error: %s\" % r.status_code)\n except Exception as err: # not a RSA key\n logger.warning(\"Can't load key: %s\" % err)\n return []", "def load_cert(file):\n with open(file, \"r\") as pemfile:\n cert_content = pemfile.read()\n cert_stripped = \"\".join(\n [line for line in cert_content.splitlines() if \"CERTIFICATE\" not in line])\n\n logging.info('Loaded certificate from {}'.format(file))\n return cert_stripped", "def load(cls, cert_path: Union[Path, str], key_path: Union[Path, str]) -> \"CertificateAuthority\":\n cert_path, key_path = Path(cert_path), Path(key_path)\n\n with cert_path.open(\"rb\") as file:\n cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, file.read())\n\n with key_path.open(\"rb\") as file:\n key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, file.read())\n\n return cls(key, cert)", "def _try_load_ca_private_key(path):\n pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, open(path, 'rb').read())\n if pkey.bits() < 2048:\n raise ValueError(\"I'm sorry Dave, I can't let you use a small \"\n \"RSA key.\")\n pkey.check()\n return pkey", "def GetCurrentCertsFile():\n return _ca_certs_file", "def get_certificate_from_file(file_path):\n LOG.debug(\"extracting information of certificate in %s\" % file_path)\n try:\n with open(file_path, 'rb') as file_data:\n file_data.seek(0, os.SEEK_SET)\n read_file = file_data.read()\n certificate = extract_certs_from_pem(read_file)[0]\n except Exception as e:\n LOG.warning(\"No certificate was extracted from file %s\"\n \"due to %s\" % (file_path, e))\n return None\n return certificate", "def fetch_cert_files():\n\n httpd_assets = fetch_unittest_assets_dir('httpd')\n keyfile = os.path.join(httpd_assets, 'test-notprivate-key-pem')\n certfile = os.path.join(httpd_assets, 'test-cert-pem')\n return keyfile, certfile", "def test_use_certificate_file_missing(self, tmpfile):\n ctx = Context(SSLv23_METHOD)\n with pytest.raises(Error):\n ctx.use_certificate_file(tmpfile)", "def svn_client_get_ssl_client_cert_file_provider(svn_auth_provider_object_t_provider, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def load_certificates_bytes_from_file(certificates_file_path: str) -> bytes:\n\n try:\n with open(certificates_file_path, 'rb') as certs_file:\n return certs_file.read()\n except FileNotFoundError:\n raise X509CertificateError(\n 'Certificates file not found: {}'.format(certificates_file_path)\n )\n except Exception as err:\n raise X509CertificateError(\n 'Certificates file could not be read: {}'.format(str(err))\n )", "def get_certificate(self, path: Union[bytes, str]) -> str:\n path = _to_bytes_or_null(path)\n certificate = ffi.new(\"char **\")\n ret = lib.Fapi_GetCertificate(self._ctx, path, certificate)\n _chkrc(ret)\n # certificate is guaranteed to be a null-terminated string\n return ffi.string(_get_dptr(certificate, lib.Fapi_Free)).decode()", "def load_key_and_cert(key_file, cert_file):\n with open(cert_file, 'rb') as f:\n cert = x509.load_pem_x509_certificate(f.read(), default_backend())\n with open(key_file, 'rb') as f:\n key = serialization.load_pem_private_key(f.read(), None, backend=default_backend())\n\n return key, cert", "def test_use_certificate_file_bytes(self, tmpfile):\n filename = tmpfile + NON_ASCII.encode(getfilesystemencoding())\n self._use_certificate_file_test(filename)", "def _check_ca_certificate(self):\n if not os.path.exists(self._ca_certificate_path):\n with open(self._ca_certificate_path, \"w\") as f:\n f.write(ssl.get_server_certificate((\"127.0.0.1\", self._app_port), ssl_version=ssl.PROTOCOL_TLSv1_2))", "def get_own_cert_as_openssl_object(self):\n# _log.debug(\"get_own_cert_as_openssl_object: node_name={}\".format(self.node_name))\n certpath, cert, certstr = self.get_own_cert()\n return cert", "def test_load_client_ca(self, context, ca_file):\n context.load_client_ca(ca_file)", "def get_certificate(self, cert_id):\r\n return self.ssl.getObject(id=cert_id)", "def __init__(self, cert_string=None, cert_file=None, key_string=None, key_file=None, passphrase=None):\n self._context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)\n\n if cert_file:\n # we have to load certificate for equality check. there is no\n # other way to obtain certificate from context.\n with open(cert_file, 'rb') as fp:\n cert_string = fp.read()\n\n cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_string)\n self._context.use_certificate(cert)\n\n if not key_string and not key_file:\n # OpenSSL is smart enought to locate private key in certificate\n args = [OpenSSL.crypto.FILETYPE_PEM, cert_string]\n if passphrase is not None:\n args.append(passphrase)\n\n pk = OpenSSL.crypto.load_privatekey(*args)\n self._context.use_privatekey(pk)\n elif key_file and not passphrase:\n self._context.use_privatekey_file(key_file, OpenSSL.crypto.FILETYPE_PEM)\n\n else:\n if key_file:\n # key file is provided with passphrase. context.use_privatekey_file\n # does not use passphrase, so we have to load the key file manually.\n with open(key_file, 'rb') as fp:\n key_string = fp.read()\n\n args = [OpenSSL.crypto.FILETYPE_PEM, key_string]\n if passphrase is not None:\n args.append(passphrase)\n\n pk = OpenSSL.crypto.load_privatekey(*args)\n self._context.use_privatekey(pk)\n\n # check if we are not passed some garbage\n self._context.check_privatekey()\n\n # used to compare certificates.\n self._equality = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)", "def _use_certificate_file_test(self, certificate_file):\n # TODO\n # Hard to assert anything. But we could set a privatekey then ask\n # OpenSSL if the cert and key agree using check_privatekey. Then as\n # long as check_privatekey works right we're good...\n with open(certificate_file, \"wb\") as pem_file:\n pem_file.write(root_cert_pem)\n\n ctx = Context(SSLv23_METHOD)\n ctx.use_certificate_file(certificate_file)", "def test_use_certificate_chain_file_missing_file(self, tmpfile):\n context = Context(SSLv23_METHOD)\n with pytest.raises(Error):\n context.use_certificate_chain_file(tmpfile)", "def get_tls_factory(self):\n if not access(self.cert_path, R_OK):\n raise RuntimeError('Error: cert file at %s is not '\n 'readable' % self.cert_path)\n if not access(self.key_path, R_OK):\n raise RuntimeError('Error: key file at %s is not '\n 'readable' % self.key_path)\n if not HAVE_PYOPENSSL:\n raise RuntimeError('Error: running with TLS (cert and key) requires'\n ' pyOpenSSL, but it does not appear to be '\n 'installed. Please \"pip install pyOpenSSL\".')\n # check certs are readable\n cf = certificateOptionsFromFiles(self.key_path, self.cert_path)\n return cf", "def fetch_cert(source, entry, s3_client):\n if source == \"s3\":\n bucket_and_key = parse_s3_url(entry)\n logger.info(\"...reading s3 source = {}\".format(bucket_and_key))\n pem_cert = s3_client.get_object(\n Bucket=bucket_and_key[\"bucket\"], Key=bucket_and_key[\"key\"]\n )\n pem_cert_body = pem_cert[\"Body\"].read()\n elif source == \"memory\":\n logger.info(\"...reading from memory\")\n pem_cert_body = entry\n else:\n raise ValueError(\n \"Invalid cert entry type {}, \" \"must be one of s3, memory\".format(source)\n )\n\n # Python3 will return a byte string, Python2 will return a string\n if type(pem_cert_body) == bytes:\n pem_cert_body = pem_cert_body.decode(\"utf-8\")\n\n return pem_cert_body", "def load_cert_string(string, format=FORMAT_PEM):\n bio = BIO.MemoryBuffer(string)\n return load_cert_bio(bio, format)", "def getfilehttps(self, url):\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n response = urllib.request.urlopen(url, context=ctx)\n result = response.read()\n return result", "def get_ca_private_key():\n return _try_load_ca_private_key(cfg.ca_private_key_path())", "def svn_client_get_ssl_client_cert_pw_file_provider(svn_auth_provider_object_t_provider, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def tls_certificate(ca):\n interface, _host, _port = _get_conn_data(ANY_INTERFACE_IPV4)\n return ca.issue_cert(ntou(interface))", "def __init__(self, enterprise_cert_file_path):\n self._enterprise_cert_file_path = enterprise_cert_file_path\n self._cert = None\n self._sign_callback = None", "def test_use_certificate(self, ctx_or_conn):\n # TODO\n # Hard to assert anything. But we could set a privatekey then ask\n # OpenSSL if the cert and key agree using check_privatekey. Then as\n # long as check_privatekey works right we're good...\n ctx_or_conn.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )", "def _get_ca_bundle():\n try:\n import certifi\n return certifi.where()\n except ImportError:\n pass", "def initial_setup():\n\n if os.path.exists(cfg.ca_private_key_path()):\n pkey = _try_load_ca_private_key(cfg.ca_private_key_path())\n else:\n pkey = _generate_ca_private_key(cfg.ca_private_key_path())\n\n if os.path.exists(cfg.ca_cert_path()):\n _try_load_ca_cert(cfg.ca_cert_path())\n else:\n _generate_ca_cert(cfg.ca_cert_path(), pkey)", "def _retrieve_crt_path(haproxy_base_dir, listener, primary_cn):\n confs_dir = os.path.abspath(os.path.normpath(haproxy_base_dir))\n confs_path = os.path.join(confs_dir, listener.id)\n if haproxy_base_dir and listener.id:\n if not os.path.isdir(confs_path):\n os.makedirs(confs_path, 0o755)\n return os.path.join(\n confs_path, '{0}.pem'.format(primary_cn))", "def tls_ca_certificate_pem_path(ca):\n with ca.cert_pem.tempfile() as ca_cert_pem:\n yield ca_cert_pem", "def try_as_domain(prog, inp):\n absolute = False\n if ( (inp[:5] == \"live/\" and len(inp) > 5) or\n (inp[:8] == \"archive/\" and len(inp) > 8) ):\n cont = prog.letsencrypt_directory / inp\n else:\n cont = pathlib.Path(inp)\n if cont.is_absolute():\n absolute = True\n # if the file is an absolute path, then we will have already done\n # a check on it before that it's a file; so later on we won't do\n # this check again. If the file is _not_ an absolute path, then\n # before we completed the path from the current working direcotry;\n # here, we'll complete it instead from the Let's Encrypt parent\n # directory.\n else:\n cont = prog.letsencrypt_live_directory / inp\n\n try:\n if cont.is_dir():\n files = [ f for f in cont.iterdir() if f.is_file() ]\n if files:\n return files\n raise Except.FunctionError(\n \"no certificates in '{}' found\".format(cont))\n elif not absolute:\n file = try_as_file(str(cont))\n if file:\n return file\n raise Except.FunctionError(\n \"certificate '{}' could not be resolved\".format(inp))\n else:\n raise Except.FunctionError(\n \"certificate '{}' could not be resolved\".format(inp))\n except OSError as ex:\n raise Except.FunctionError(\"resolving file '{}' failed: {}\".format(\n file, ex.strerror.lower() ) )", "def test_load_verify_bytes_cafile(self, tmpfile):\n cafile = tmpfile + NON_ASCII.encode(getfilesystemencoding())\n self._load_verify_cafile(cafile)", "def _lazy_read_ca_bundle():\n if len(ROOT_CERTIFICATES_DICT) > 0:\n return\n\n logger = getLogger(__name__)\n try:\n ca_bundle = (os.environ.get('REQUESTS_CA_BUNDLE') or\n os.environ.get('CURL_CA_BUNDLE'))\n if ca_bundle and path.exists(ca_bundle):\n # if the user/application specifies cabundle.\n read_cert_bundle(ca_bundle)\n else:\n import sys\n from botocore.vendored.requests import certs\n if hasattr(certs, '__file__') and \\\n path.exists(certs.__file__) and \\\n path.exists(path.join(\n path.dirname(certs.__file__), 'cacert.pem')):\n # if cacert.pem exists next to certs.py in request pacakage\n ca_bundle = path.join(\n path.dirname(certs.__file__), 'cacert.pem')\n read_cert_bundle(ca_bundle)\n elif hasattr(sys, '_MEIPASS'):\n # if pyinstaller includes cacert.pem\n cabundle_candidates = [\n ['botocore', 'vendored', 'requests', 'cacert.pem'],\n ['requests', 'cacert.pem'],\n ['cacert.pem'],\n ]\n for filename in cabundle_candidates:\n ca_bundle = path.join(sys._MEIPASS, *filename)\n if path.exists(ca_bundle):\n read_cert_bundle(ca_bundle)\n break\n else:\n logger.error('No cabundle file is found in _MEIPASS')\n try:\n import certifi\n read_cert_bundle(certifi.where())\n except:\n logger.debug('no certifi is installed. ignored.')\n\n except Exception as e:\n logger.error('Failed to read ca_bundle: %s', e)\n\n if len(ROOT_CERTIFICATES_DICT) == 0:\n logger.error('No CA bundle file is found in the system. '\n 'Set REQUESTS_CA_BUNDLE to the file.')", "def solve(certificate_data: bytes) -> Certificate:\n return { # type: ignore\n Encoding.PEM: load_pem_x509_certificate,\n Encoding.DER: load_der_x509_certificate\n }[real_encoding](certificate_data, default_backend())", "def _get_cert_link(self, cert_name):\n return '%s%s/%s.crt' % (self.ca_dir, CERT_DIR_NAME, cert_name)", "def ca_file(tmpdir):\n key = rsa.generate_private_key(public_exponent=65537, key_size=2048)\n public_key = key.public_key()\n\n builder = x509.CertificateBuilder()\n builder = builder.subject_name(\n x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, \"pyopenssl.org\")])\n )\n builder = builder.issuer_name(\n x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, \"pyopenssl.org\")])\n )\n one_day = datetime.timedelta(1, 0, 0)\n builder = builder.not_valid_before(datetime.datetime.today() - one_day)\n builder = builder.not_valid_after(datetime.datetime.today() + one_day)\n builder = builder.serial_number(int(uuid.uuid4()))\n builder = builder.public_key(public_key)\n builder = builder.add_extension(\n x509.BasicConstraints(ca=True, path_length=None),\n critical=True,\n )\n\n certificate = builder.sign(private_key=key, algorithm=hashes.SHA256())\n\n ca_file = tmpdir.join(\"test.pem\")\n ca_file.write_binary(\n certificate.public_bytes(\n encoding=serialization.Encoding.PEM,\n )\n )\n\n return str(ca_file).encode(\"ascii\")", "def test_load_client_ca_unicode(self, context, ca_file):\n pytest.deprecated_call(context.load_client_ca, ca_file.decode(\"ascii\"))", "def load_key():\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n try:\r\n return open(key_dir, \"rb\").read()\r\n except:\r\n return None", "def _load_verify_cafile(self, cafile):\n with open(cafile, \"w\") as fObj:\n fObj.write(root_cert_pem.decode(\"ascii\"))\n\n self._load_verify_locations_test(cafile)", "def test_get_certificate(self):\n chain = _create_certificate_chain()\n [(cakey, cacert), (ikey, icert), (skey, scert)] = chain\n\n context = Context(SSLv23_METHOD)\n context.use_certificate(scert)\n client = Connection(context, None)\n cert = client.get_certificate()\n assert cert is not None\n assert \"Server Certificate\" == cert.get_subject().CN", "def test_load_client_ca_invalid(self, context, tmpdir):\n ca_file = tmpdir.join(\"test.pem\")\n ca_file.write(\"\")\n\n with pytest.raises(Error) as e:\n context.load_client_ca(str(ca_file).encode(\"ascii\"))\n\n assert \"PEM routines\" == e.value.args[0][0][0]", "def load_crl(file):\n f=BIO.openfile(file)\n cptr= m2.x509_crl_read_pem(f.bio_ptr())\n f.close()\n if cptr is None:\n raise X509Error(Err.get_error())\n return CRL(cptr, 1)", "def _generate_ca_cert(path, pkey):\n crt = _make_base_cert(pkey, 5000, socket.gethostname(),\n random.randrange(0, 2**64))\n crt.set_issuer(crt.get_subject())\n crt.sign(pkey, 'sha256')\n\n data = crypto.dump_certificate(crypto.FILETYPE_PEM, crt)\n open(path, 'wb').write(data)", "def get_certificate(self, cert_name, callback=None):\n # TODO: get certificate from DHT (alternative to getting from disk).\n# _log.debug(\"get_certificate:\\n\\tmy_node_name={}\\n\\tcert_name={}\\n\\tcallback={}\".format(self.node_name, cert_name, callback))\n try:\n cert = self.get_certificate_locally(cert_name)\n if cert and callback:\n callback(certstring=cert)\n elif cert:\n return cert\n else:\n try:\n self.node.storage.get_index(['certificate',cert_name],\n cb=CalvinCB(self._get_certificate_from_storage_cb,\n callback=callback))\n except Exception as err:\n _log.debug(\"Certificate could not be found in storage, err={}\".format(err))\n raise\n except Exception as err:\n _log.debug(\"Failed searching for certificate locally, cert_name={}, err={}\".format(cert_name, err))", "def __init__(self, proxy_only = False):\n self.key_file = None\n self.cert_file = None\n self.ca_path = None\n self.key_pass = None\n\n path = os.getenv(\"X509_CERT_DIR\", None)\n if path and os.path.exists(path):\n self.ca_path = path\n\n if not self.ca_path:\n path = \"/etc/grid-security/certificates\"\n if os.path.exists(path):\n self.ca_path = path\n\n path = os.getenv(\"X509_USER_PROXY\", None)\n if path and os.path.exists(path):\n self.key_file = self.cert_file = path\n\n if not self.key_file:\n path = os.getenv(\"X509_USER_KEY\", None)\n if path and os.path.exists(path):\n self.key_file = path\n\n if not self.cert_file:\n path = os.getenv(\"X509_USER_CERT\", None)\n if path and os.path.exists(path):\n self.cert_file = path\n\n if not self.key_file:\n path = os.getenv(\"HOME\") + \"/.globus/userkey.pem\"\n if os.path.exists(path):\n self.key_file = path\n\n if not self.cert_file:\n path = os.getenv(\"HOME\") + \"/.globus/usercert.pem\"\n if os.path.exists(path):\n self.cert_file = path\n\n if not self.ca_path or not os.path.exists(self.ca_path):\n raise RuntimeError(\"no certificate directory found\")\n\n if not self.key_file or not os.path.exists(self.key_file):\n raise RuntimeError(\"no certificate private key file found\")\n\n if not self.cert_file or not os.path.exists(self.cert_file):\n raise RuntimeError(\"no certificate public key file found\")\n\n if not proxy_only and self.key_file != self.cert_file:\n self.key_pass = getpass(\"Password for %s: \" % self.key_file)", "def test_get_cert_store(self):\n context = Context(SSLv23_METHOD)\n store = context.get_cert_store()\n assert isinstance(store, X509Store)", "def opensslCmsCertCreate( ownerCertFile ):\n opensslCmdArgs = [ \"openssl\", \"crl2pkcs7\", \"-certfile\", ownerCertFile,\n \"-nocrl\", \"-outform\", \"der\" ]\n ownerCertCmsDerBase64 = runOpensslCmd( opensslCmdArgs, [ \"base64\" ] )\n return ownerCertCmsDerBase64", "def from_file(path, encoding='pem'):\n try:\n with open(path, 'r') as f:\n return X509Csr.from_open_file(f, encoding)\n except IOError:\n raise X509CsrError(\"Could not read file %s\" % path)", "def prepare_certificate_file(certificate: str) -> str:\n certificate_file = NamedTemporaryFile(delete=False)\n certificate_path = certificate_file.name\n certificate_file.write(bytes(certificate, 'utf-8'))\n certificate_file.close()\n demisto.debug('Successfully preparing the certificate')\n return certificate_path", "def Certificate(self) -> _n_8_t_0:", "def Certificate(self) -> _n_8_t_0:", "def get_certificate_locally(self, cert_name):\n #TODO: this should be made asynchronous as it reads from filessystem\n _log.debug(\"get_certificate_locally:\\n\\tmy_node_name={}\\n\\tcert_name={}\\n\\t\".format(self.node_name, cert_name))\n if cert_name == self.node_id:\n _log.debug(\"Look for runtimes own certificate {} in {{mine}} folder, err={}\".format(cert_name, err))\n try:\n# certpath = self.get_own_cert_path()\n# self.certificate.truststore_transport.verify_certificate_from_path(certpath)\n# with open(certpath, 'rb') as fd:\n# certstr=fd.read()\n return self.cert_str\n except Exception as err:\n _log.debug(\"Certificate {} is not in {{mine}} folder, return None, err={}\".format(cert_name, err))\n return None\n else:\n if cert_name in self.cert_dict:\n return self.cert_dict[cert_name]\n else:\n try:\n _log.debug(\"Look for certificate in others folder, cert_name={}\".format(cert_name))\n # Check if the certificate is in the 'others' folder for runtime my_node_name.\n files = os.listdir(os.path.join(self.runtime_dir, \"others\"))\n matching = [s for s in files if cert_name in s]\n certpath = os.path.join(self.runtime_dir, \"others\", matching[0])\n self.certificate.truststore_transport.verify_certificate_from_path(certpath)\n with open(certpath, 'rb') as fd:\n certstr=fd.read()\n #TODO: some cleaning of self.cert_dict is probably a good idea\n self.cert_dict[cert_name]=certstr\n return certstr\n except Exception as err:\n _log.debug(\"Certificate {} is not in {{others}} folder, return None, err={}\".format(cert_name, err))\n return None", "def _get_cert_path(self, cert_name, serial):\n return '%s%s/%d_%s.crt' % (self.ca_dir, CERT_DIR_NAME, serial,\n cert_name)", "def test_load_verify_invalid_file(self, tmpfile):\n clientContext = Context(SSLv23_METHOD)\n with pytest.raises(Error):\n clientContext.load_verify_locations(tmpfile)", "def load_private(file):\n with open(file, \"rb\") as pemfile:\n key = jwk.JWK.from_pem(pemfile.read())\n\n logging.info('Loaded private key from {}'.format(file))\n return key", "def test_verify_ssl_https_source(file, tmp_path, ssl_webserver, verify_ssl):\n name = tmp_path / \"test_verify_ssl_true.txt\"\n source = ssl_webserver.url(\"this.txt\")\n source_hash = f\"{source}.sha256\"\n\n ret = file.managed(\n str(name),\n source=source,\n source_hash=source_hash,\n verify_ssl=verify_ssl,\n skip_verify=False,\n )\n if verify_ssl is True:\n assert ret.result is False\n assert \"SSL: CERTIFICATE_VERIFY_FAILED\" in ret.comment\n assert not name.exists()\n else:\n if IS_WINDOWS and not os.environ.get(\"GITHUB_ACTIONS_PIPELINE\"):\n pytest.xfail(\n \"This test fails when running from Jenkins but not on the GitHub \"\n \"Actions Pipeline\"\n )\n assert ret.result is True\n assert ret.changes\n # mode, if present is not important for this test\n ret.changes.pop(\"mode\", None)\n assert ret.changes == {\"diff\": \"New file\"}\n assert name.exists()", "def load_from_existing(self, obj):\n self.subject = self.extract_name(obj.subject)\n\n for ext in obj.extensions:\n crit = ext.critical\n extobj = ext.value\n if ext.oid == ExtensionOID.BASIC_CONSTRAINTS:\n if not crit:\n raise InvalidCertificate(\"BASIC_CONSTRAINTS must be critical\")\n self.ca = extobj.ca\n self.path_length = None\n if self.ca:\n self.path_length = extobj.path_length\n elif ext.oid == ExtensionOID.KEY_USAGE:\n if not crit:\n raise InvalidCertificate(\"KEY_USAGE must be critical\")\n self.usage += self.extract_key_usage(extobj)\n elif ext.oid == ExtensionOID.SUBJECT_ALTERNATIVE_NAME:\n self.san = self.extract_gnames(extobj)\n elif ext.oid == ExtensionOID.EXTENDED_KEY_USAGE:\n self.usage += self.extract_xkey_usage(extobj)\n elif ext.oid == ExtensionOID.AUTHORITY_INFORMATION_ACCESS:\n for ad in extobj:\n if not isinstance(ad.access_location, x509.UniformResourceIdentifier):\n InvalidCertificate(\"Unsupported access_location: %s\" % (ad.access_location,))\n url = as_unicode(ad.access_location.value)\n\n if ad.access_method == AuthorityInformationAccessOID.CA_ISSUERS:\n self.issuer_urls.append(url)\n elif ad.access_method == AuthorityInformationAccessOID.OCSP:\n self.ocsp_urls.append(url)\n else:\n raise InvalidCertificate(\"Unsupported access_method: %s\" % (ad.access_method,))\n elif ext.oid == ExtensionOID.CRL_DISTRIBUTION_POINTS:\n for dp in extobj:\n if dp.relative_name:\n raise InvalidCertificate(\"DistributionPoint.relative_name not supported\")\n if dp.crl_issuer:\n raise InvalidCertificate(\"DistributionPoint.crl_issuer not supported\")\n if dp.reasons:\n raise InvalidCertificate(\"DistributionPoint.reasons not supported\")\n\n for gn in self.extract_gnames(dp.full_name):\n if gn.startswith('uri:'):\n self.crl_urls.append(gn[4:])\n else:\n raise InvalidCertificate(\"Unsupported DistributionPoint: %s\" % (gn,))\n elif ext.oid == ExtensionOID.NAME_CONSTRAINTS:\n self.permit_subtrees = self.extract_gnames(extobj.permitted_subtrees)\n self.exclude_subtrees = self.extract_gnames(extobj.excluded_subtrees)\n elif ext.oid == ExtensionOID.SUBJECT_KEY_IDENTIFIER:\n pass\n elif ext.oid == ExtensionOID.AUTHORITY_KEY_IDENTIFIER:\n pass\n elif ext.oid == ExtensionOID.OCSP_NO_CHECK:\n self.ocsp_nocheck = True\n elif ext.oid == ExtensionOID.TLS_FEATURE:\n for tls_feature_code in extobj:\n if tls_feature_code == x509.TLSFeatureType.status_request:\n self.ocsp_must_staple = True\n elif tls_feature_code == x509.TLSFeatureType.status_request_v2:\n self.ocsp_must_staple_v2 = True\n else:\n raise InvalidCertificate(\"Unsupported TLSFeature: %r\" % (tls_feature_code,))\n else:\n raise InvalidCertificate(\"Unsupported extension in CSR: %s\" % (ext,))", "async def import_certificate(\n self, certificate_name: str, certificate_bytes: bytes, **kwargs\n ) -> KeyVaultCertificate:\n\n enabled = kwargs.pop(\"enabled\", None)\n policy = kwargs.pop(\"policy\", None)\n\n if enabled is not None:\n attributes = self._models.CertificateAttributes(enabled=enabled)\n else:\n attributes = None\n base64_encoded_certificate = base64.b64encode(certificate_bytes).decode(\"utf-8\")\n\n parameters = self._models.CertificateImportParameters(\n base64_encoded_certificate=base64_encoded_certificate,\n password=kwargs.pop(\"password\", None),\n certificate_policy=policy._to_certificate_policy_bundle() if policy else None,\n certificate_attributes=attributes,\n tags=kwargs.pop(\"tags\", None),\n )\n\n bundle = await self._client.import_certificate(\n vault_base_url=self.vault_url,\n certificate_name=certificate_name,\n parameters=parameters,\n **kwargs\n )\n return KeyVaultCertificate._from_certificate_bundle(certificate_bundle=bundle)", "def test_use_certificate_chain_file_bytes(self, tmpfile):\n self._use_certificate_chain_file_test(\n tmpfile + NON_ASCII.encode(getfilesystemencoding())\n )", "def dcos_ca_bundle():\n resp = sdk_cmd.cluster_request('GET', '/ca/dcos-ca.crt')\n cert = resp.content.decode('ascii')\n assert cert is not None\n return cert", "def get_cert_file(self, bypass_time_validity_check=False):\n file_contents = (\n \"{} {} {}\"\n ).format(self.cert_key_type,\n str(base64.b64encode(self._sign_cert(bypass_time_validity_check)), encoding='ascii'),\n self.public_key_comment)\n return file_contents", "def ssl_get_cert_from_request(request):\r\n certkey = \"SSL_CLIENT_S_DN\" # specify the request.META field to use\r\n\r\n cert = request.META.get(certkey, '')\r\n if not cert:\r\n cert = request.META.get('HTTP_' + certkey, '')\r\n if not cert:\r\n try:\r\n # try the direct apache2 SSL key\r\n cert = request._req.subprocess_env.get(certkey, '')\r\n except Exception:\r\n return ''\r\n\r\n return cert", "def load_cert_bio(bio, format=FORMAT_PEM):\n if format == FORMAT_PEM:\n cptr = m2.x509_read_pem(bio._ptr())\n elif format == FORMAT_DER:\n cptr = m2.d2i_x509(bio._ptr())\n else:\n raise ValueError(\"Unknown format. Must be either FORMAT_DER or FORMAT_PEM\")\n if cptr is None:\n raise X509Error(Err.get_error())\n return X509(cptr, _pyfree=1)", "def generate_ssl_object(module, ssl_cafile, ssl_certfile, ssl_keyfile,\n ssl_crlfile=None):\n\n ssl_files = {\n 'cafile': {'path': ssl_cafile, 'is_temp': False},\n 'certfile': {'path': ssl_certfile, 'is_temp': False},\n 'keyfile': {'path': ssl_keyfile, 'is_temp': False},\n 'crlfile': {'path': ssl_crlfile, 'is_temp': False}\n }\n\n for key, value in ssl_files.items():\n if value['path'] is not None:\n # TODO is that condition sufficient?\n if value['path'].startswith(\"-----BEGIN\"):\n # value is a content, need to create a tempfile\n fd, path = tempfile.mkstemp(prefix=key)\n with os.fdopen(fd, 'w') as tmp:\n tmp.write(value['path'])\n ssl_files[key]['path'] = path\n ssl_files[key]['is_temp'] = True\n elif not os.path.exists(os.path.dirname(value['path'])):\n # value is not a content, but path does not exist,\n # fails the module\n module.fail_json(\n msg='\\'%s\\' is not a content and provided path does not '\n 'exist, please check your SSL configuration.' % key\n )\n\n return ssl_files", "def load_cert_der_string(string):\n bio = BIO.MemoryBuffer(string)\n cptr = m2.d2i_x509(bio._ptr())\n if cptr is None:\n raise X509Error(Err.get_error())\n return X509(cptr, _pyfree=1)", "def test_fallback_default_verify_paths(self, monkeypatch):\n context = Context(SSLv23_METHOD)\n monkeypatch.setattr(\n _lib, \"SSL_CTX_set_default_verify_paths\", lambda x: 1\n )\n monkeypatch.setattr(\n SSL,\n \"_CRYPTOGRAPHY_MANYLINUX_CA_FILE\",\n _ffi.string(_lib.X509_get_default_cert_file()),\n )\n monkeypatch.setattr(\n SSL,\n \"_CRYPTOGRAPHY_MANYLINUX_CA_DIR\",\n _ffi.string(_lib.X509_get_default_cert_dir()),\n )\n context.set_default_verify_paths()\n store = context.get_cert_store()\n sk_obj = _lib.X509_STORE_get0_objects(store._store)\n assert sk_obj != _ffi.NULL\n num = _lib.sk_X509_OBJECT_num(sk_obj)\n assert num != 0", "def test_use_certificate_file_wrong_args(self):\n ctx = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n ctx.use_certificate_file(object(), FILETYPE_PEM)\n with pytest.raises(TypeError):\n ctx.use_certificate_file(b\"somefile\", object())\n with pytest.raises(TypeError):\n ctx.use_certificate_file(object(), FILETYPE_PEM)", "def load_private_key(filename):\n\twith open(str(filename) + \"_key.pem\", \"rb\") as key_file:\n\t\treturn serialization.load_pem_private_key(\n\t\tkey_file.read(),\n\t\tpassword=None,\n\t\tbackend=default_backend()\n\t)", "def _parse_certificate(cls, response):\n links = _parse_header_links(response)\n try:\n cert_chain_uri = links[u'up'][u'url']\n except KeyError:\n cert_chain_uri = None\n return (\n response.content()\n .addCallback(\n lambda body: messages.CertificateResource(\n uri=cls._maybe_location(response),\n cert_chain_uri=cert_chain_uri,\n body=body))\n )", "def _load_spec(self, spec_path: str or PosixPath):\n\n if not isinstance(spec_path, (str, PosixPath)):\n raise TypeError('spec_path must be str or PosixPath!')\n elif isinstance(spec_path, str) and spec_path.startswith('http'):\n context = ssl._create_unverified_context()\n if self.login and self.password:\n spec = urlopen_with_auth(spec_path, self.login, self.password, context)\n else:\n spec = urlopen(spec_path, context=context).read() # may throw HTTPError, URLError\n else: # supplied path, not a link\n with open(spec_path, encoding='utf8') as f:\n spec = f.read()\n self.spec = yaml.load(spec, yaml.Loader)", "def test_use_certificate_file_unicode(self, tmpfile):\n filename = tmpfile.decode(getfilesystemencoding()) + NON_ASCII\n self._use_certificate_file_test(filename)", "def tls_certificate_chain_pem_path(tls_certificate):\n with tls_certificate.private_key_and_cert_chain_pem.tempfile() as cert_pem:\n yield cert_pem", "def cert(self):\n return self._cert", "def init_pki():\n global server_keystore\n\n if pki_is_persistent:\n if not Path(pki_dir).is_dir():\n create_pki()\n else:\n print(f'Do nothing, {pki_dir} already exists')\n else:\n if Path(pki_dir).is_dir():\n shutil.rmtree(pki_dir)\n create_pki()\n with open(server_key_files[\"ca\"]) as crt:\n server_keystore[\"ca\"] = crt.read()\n crt.close()", "def get_system_ca_file():\n # Standard CA file locations for Debian/Ubuntu, RedHat/Fedora,\n # Suse, FreeBSD/OpenBSD\n ca_path = ['/etc/ssl/certs/ca-certificates.crt',\n '/etc/pki/tls/certs/ca-bundle.crt',\n '/etc/ssl/ca-bundle.pem',\n '/etc/ssl/cert.pem']\n for ca in ca_path:\n if os.path.exists(ca):\n return ca\n return None", "def get_authentication_certificate(hostname:str) -> str:\r\n host = hostname.split(\":\")[0]\r\n port = int(hostname.split(\":\")[1] or 443)\r\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\r\n sock = context.wrap_socket(conn, server_hostname=host)\r\n sock.connect((host, port))\r\n cert = ssl.DER_cert_to_PEM_cert(sock.getpeercert(True))\r\n return str.encode(cert)", "def get_own_cert(self):\n# _log.debug(\"get_own_cert: node_name={}\".format(self.node_name))\n try:\n certpath = self.get_own_cert_path()\n st_cert = open(certpath, 'rt').read()\n cert_part = st_cert.split(BEGIN_CRT_LINE)\n certstr = \"{}{}\".format(BEGIN_CRT_LINE, cert_part[1])\n cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,\n certstr)\n _log.debug(\"get_own_cert\"\n \"\\n\\tcertpath={}\".format(certpath))\n #Check that the certificate parameters are the same as our attributes\n if not certificate.cert_O(certstring=certstr) == self.domain:\n _log.error(\"Domain does not match certificate\")\n raise Exception(\"Domain does not match certificate\")\n if not certificate.cert_CN(certstring=certstr) == self.node_name:\n _log.error(\"Node name does not match certificate\")\n raise Exception(\"Node name does not match certificate\")\n if not certificate.cert_DN_Qualifier(certstring=certstr) == self.node_id:\n _log.error(\"Node ID does not match certificate\")\n raise Exception(\"Node ID does not match certificate\")\n return certpath, cert, certstr\n except Exception as err:\n # Certificate not available\n _log.debug(\"No runtime certificate can be found, err={}\".format(err))\n return None, None, None", "def tls_certificate_private_key_pem_path(tls_certificate):\n with tls_certificate.private_key_pem.tempfile() as cert_key_pem:\n yield cert_key_pem", "def create_ssl_context(cert, key, **kwargs):\n if hasattr(ssl, 'SSLContext'):\n ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER, **kwargs)\n ctx.load_cert_chain(cert, key)\n return ctx\n\n if isinstance(cert, str):\n with open(cert, 'rb') as f:\n cert = f.read()\n if isinstance(key, str):\n with open(key, 'rb') as f:\n key = f.read()\n\n class FakeSSLSocket:\n def __init__(self, sock, **kwargs):\n self.sock = sock\n self.kwargs = kwargs\n\n def accept(self):\n client, addr = self.sock.accept()\n return (ssl.wrap_socket(client, cert=cert, key=key, **self.kwargs),\n addr)\n\n def close(self):\n self.sock.close()\n\n class FakeSSLContext:\n def __init__(self, **kwargs):\n self.kwargs = kwargs\n\n def wrap_socket(self, sock, **kwargs):\n all_kwargs = self.kwargs.copy()\n all_kwargs.update(kwargs)\n return FakeSSLSocket(sock, **all_kwargs)\n\n return FakeSSLContext(**kwargs)", "def import_public_key_from_cert_file(filename):\n with open(filename, \"rb\") as key_file:\n cert = x509.load_pem_x509_certificate(key_file.read(), backend=default_backend())\n return cert.public_key()", "def get_certinfo(doc):\n\n #set a two second default timeout to recieve a cert\n socket.setdefaulttimeout(2)\n doc['ssl'] = {} \n\n try:\n cert = ssl.get_server_certificate((doc['hostname'], 443))\n #sometimes certs come back as unicode so cast to str() aka ascii\n cert = M2Crypto.X509.load_cert_string(str(cert))\n\n except:\n syslog.syslog('[*] Failed to get ssl certificate from %s' % doc['hostname'])\n print('[*] Failed to get ssl certificate from %s' % doc['hostname'])\n #lets remove the ssl key and return the doc untouched\n doc.pop('ssl')\n return doc\n\n\n #get creation date\n doc['ssl']['created'] = cert.get_not_before().get_datetime().isoformat()\n #get not valid after, aka expiration data\n doc['ssl']['expire'] = cert.get_not_after().get_datetime().isoformat()\n #get issuer information\n doc['ssl']['issuer'] = cert.get_issuer().as_text()\n #get subject information\n doc['ssl']['subject'] = cert.get_subject().as_text()\n #get keysize, size() returns in bytes, so we multiply * 8 to get the number of bits\n doc['ssl']['keysize'] = cert.get_pubkey().size() * 8\n #get cert fingerprint for comparison\n doc['ssl']['fingerprint'] = cert.get_fingerprint()\n\n return doc", "def load_key():\n return open(\"pass.key\", \"rb\").read()", "def get_ssl_context() -> ssl.SSLContext | None:\n if (ca_folder := os.getenv('ref_ca')) is None: # noqa: SIM112\n return None\n return mk_ssl_context_from_folder(ca_folder,\n private_key='user_private_key_encrypted.pem',\n certificate='user_certificate_root_signed.pem',\n ca_public_key='root_certificate.pem',\n cyphers_file=None,\n ssl_passwd=os.getenv('ref_ssl_passwd')) # noqa: SIM112", "def load_key(self, pemfile_path_abs: str, set_priv=False) -> None:\n return None", "def _get_ssl_context(self):\n context = ssl.SSLContext(self.TLS_VERSION)\n context.load_cert_chain(self.ssl_cert, self.ssl_key)\n return context", "def get_ssl_ca_settings():\n ca_data = {}\n https_service_endpoints = config('https-service-endpoints')\n if (https_service_endpoints and\n bool_from_string(https_service_endpoints)):\n # Pass CA cert as client will need it to\n # verify https connections\n ca = get_ca(user=SSH_USER)\n ca_bundle = ca.get_ca_bundle()\n ca_data['https_keystone'] = 'True'\n ca_data['ca_cert'] = b64encode(ca_bundle)\n return ca_data", "def fetch_x509_context(self) -> X509Context:", "def test_load_tmp_dh_missing_file(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(Error):\n context.load_tmp_dh(b\"hello\")" ]
[ "0.7039763", "0.70248497", "0.69557095", "0.69070625", "0.6604512", "0.6576468", "0.6486132", "0.6481465", "0.6431241", "0.6423316", "0.6407451", "0.63593185", "0.6340354", "0.61137533", "0.60513645", "0.6047259", "0.59898525", "0.59736806", "0.59400916", "0.5930687", "0.5902788", "0.58989924", "0.5859162", "0.5857387", "0.58547103", "0.5854598", "0.584414", "0.5842439", "0.58400023", "0.58119136", "0.57987404", "0.5795956", "0.5773791", "0.57249993", "0.5719006", "0.571603", "0.56976914", "0.5696759", "0.5671121", "0.5652397", "0.56455183", "0.5643211", "0.5627337", "0.55655473", "0.5541669", "0.5537286", "0.5537142", "0.5536612", "0.55182415", "0.5505451", "0.5484574", "0.54755116", "0.54735786", "0.54676425", "0.5467544", "0.5451464", "0.5449899", "0.54469705", "0.54453826", "0.5441565", "0.54265624", "0.54210746", "0.54210746", "0.541695", "0.5400797", "0.5394894", "0.53783166", "0.53755176", "0.5374711", "0.5374216", "0.5368375", "0.53647673", "0.5360441", "0.53595465", "0.53558236", "0.53529316", "0.53491515", "0.53336656", "0.5321161", "0.5314528", "0.5313407", "0.5312417", "0.5312069", "0.5303294", "0.5303077", "0.52995116", "0.5295946", "0.529218", "0.52900356", "0.5287189", "0.52831364", "0.52818465", "0.5279893", "0.5275465", "0.5274832", "0.5261827", "0.52394027", "0.52367276", "0.5234273", "0.52307224" ]
0.67338187
4
Constructor with data and next element
def __init__(self, data): self.data = data self.next = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, data, next=None):\n self.data = data\n self.next = next", "def __init__(self, data, next = None):\n\t\tself.data = data\n\t\tself.next = next", "def __init__(self, data=None, next=None):\n self.data = data\n self.next = next", "def __init__(self, data, next_node = None):\n self.data = data\n self.next_node = next_node", "def __init__(self, data, next_node=None):\n self.data = data\n self.next_node = next_node", "def __init__(self, data, next_node=None):\n self.data = data\n self.next_node = next_node", "def __init__(self, data):\n\n self.next = None\n \"\"\" Next text \"\"\"\n\n self.data = data\n \"\"\" Data text \"\"\"", "def __init__(self, data, next_node=None): #self.next_node ??\n self.data = data\n self.next_node = next_node", "def __init__(self, data):\n\n self.data = data\n self.next = None\n self.prev = None", "def __init__(self, data):\n self.data = data\n self.next = None", "def __init__(self, data=None, next=None):\r\n pass", "def __init__(self, data):\n self.__data = data\n self.__next = None", "def __init__(self, data=None, next_node=None, previous_node=None):\n self.data = data\n self.previous_node = previous_node\n self.next_node = next_node", "def __init__(self, data=None, next_node=None, prev_node=None):\n self._data = data\n self._prev = prev_node\n self._next = next_node", "def __init__(self, data, prev, nxt): \n self.data = data\n self.prev = prev\n self.nxt = nxt", "def __init__(self, element, previous, next):\n self._element = element\n self._previous = previous\n self._next = next", "def __init__(self, data, previous = None, next = None):\n\t\tNode.__init__(self, data, next)\n\t\tself.previous = previous", "def __init__(self, data, next_node=None):\n\n if type(data) is not int:\n raise TypeError(\"data must be an integer\")\n else:\n self.data = data\n if isinstance(next_node, Node) or next_node is None:\n self.next_node = next_node\n else:\n raise TypeError(\"next_node must be a Node object\")", "def __init__(self, value, next_node=None):\n self.value = value # element at the node\n self.next_node = next_node # reference to next node", "def __init__(self, value, next=None):\n self.value = value\n self.next = next", "def __init__(self, item, next = None):\n self.item = item\n self.next = next", "def __init__(self, data=None):\n self.data = data\n # initializing an empty node that has no next nor prior node\n self.next = self.prior = None", "def __init__(self, data=None):\n self.head = None\n self.tail = None\n if data is not None:\n try:\n for item in data:\n if item is data[0]:\n self.head = Node(item, next=None)\n self.tail = self.head\n else:\n self.head = Node(item, self.head)\n except TypeError:\n node = Node(data, next=None)\n self.head = node\n self.tail = self.head", "def __init__(self, data=None):\n self.head = None \n if data is not None:\n for value in data:\n self.append(value)", "def __init__(self, base, next=None):\n self.base = base\n self.next = next or dict()", "def __init__(self, value, next=None):\n self.value = value # element at the node\n self.next = next # reference to next node in the LinkedList", "def __init__(self, data=None):\n self.head = None\n self.tail = None\n if data is not None:\n for value in data:\n self.append(value)", "def __init__(self, iterator):\n self.iterator = iterator\n self.has_next = False\n self.next_val = None\n if self.iterator.hasNext():\n self.has_next = True\n self.next_val = self.iterator.next()", "def __init__(self, data, node):\n self.data = data\n self.node = node", "def __init__(self, iterator):\n self.iter = iterator\n self.tmpNext = iterator.next() if iterator.hasNext() else None", "def __init__(self, next_page_token=None, data=None): # noqa: E501\n self.openapi_types = {\n 'next_page_token': str,\n 'data': List\n }\n\n self.attribute_map = {\n 'next_page_token': 'next_page_token',\n 'data': 'data'\n }\n\n self._next_page_token = next_page_token\n self._data = data", "def __init__(self, word):\n self.data = word\n self.next = None\n self.count = 1", "def __init__(self, cargo=None, next=None):\n self.__cargo = cargo\n self.__next = next", "def __init__(self, cargo=None, next=None):\r\n self.__cargo = cargo\r\n self.__next = next", "def __init__(self, value, prev=None, next=None):\n\n self.prev = prev # the node before this one — defaults to None\n self.value = value # the value to store\n self.next = next # the node after this one — defaults to None", "def data_next(self, *args, **kwargs):\n # there is this nasty tradeoff where if you implement this in this way\n # where data can take arguments, then _any_ downstream artifact that you\n # want also has to take those arguments as well, clearly undesireable\n # in cases where you would like to be able to do the transformation\n # without having to haul a bunch of stuff around with you\n # what this means is that either you have to accept a set of defaults that\n # are sane and will get you what you want, you identifier is incomplete and\n # thus you add arguments to your function to flesh it out, or\n # you have to drop down a level, configure your argument ahead of time\n # and then make the request again with slightly differen types\n\n # allowing the underlying abstraction to bubble up into optional kwarsg\n # frankly seems like a pretty good option, if it werent for the fact that\n # it is an absolute pain to maintain in the absense of mimicArgs\n # I feel like cl generics could make this much easier ...\n\n # OR OR OR the graph is successor stream of the actual instantiation of this stream\n # which means that ... the extra args would go in at init time??? no\n # that doesn't seem like the right tradeoff, any successor streams\n # basically have to present kwargs for any variables that cannot be\n # set to a sane default within the scope of the identifier system (sigh)\n # or at least in cases where it hasn't been demostrated that the variables\n # are simply a matter of representaiton, not differences in information\n # (i.e. that there isn't a function that can 1:1 interconvert)\n\n generator = self.metadata().data_next(yield_response_gen=True, **kwargs)\n format, *header_chunks, (resp, gen) = generator\n self.headers = resp.headers\n self.format = format\n # TODO populate header graph? not sure this is actually possible\n # maybe need to double wrap so that the header chunks always get\n # consumbed by the header object ?\n if self.format == 'application/rdf+xml':\n resp.close()\n return None\n\n return chain(header_chunks, gen)", "def __init__(self, key, value, prev=None, next=None):\n self.key = key\n self.value = value\n self.prev = prev\n self.next = next", "def __init__(self, sequence):\n self._seq = sequence # Copy of the given data.\n # Reference to the underlying data, will increment to 0 on first call\n # to next element.\n self._k = -1", "def __init__(self, data=None):\n if data is not None:\n self._size = 1\n self.head = Node(data)\n self.tail = self.head\n else:\n self._size = 0\n self.head = None\n self.tail = None", "def __init__(self, val, next=None):\n self.val = val\n self._next = next\n if val is None:\n raise TypeError('Must pass a value')", "def __init__(self, init):\n self.stepforward = int(init)\n self.data = Linkedlist()", "def __init__(self, payload=None, next_node=None):\n self.__payload = payload\n self.__nextListNode = next_node", "def __init__(self, item):\r\n self.item = item\r\n self.next = None # Initially pointing to nothing\r", "def __init__(self, value: int):\n self._data = value\n self._next = None", "def create(data, next=None):\n return {'data': data, 'next': next}", "def __init__(self):\n\n self._header = self._Node(None, None, None)\n self._trailer = self._Node(None, None, None)\n self._header._next = self._trailer # trailer is the next node after header\n self._trailer._prev = self._header # header is the node before trailer\n self._size = 0 # keep track of the number of elements", "def __init__(self, value, back=None, front=None ):\n self.previous = back\n self.value = value\n self.next = front", "def __init__(self):\n\t\tself.current = None\n\t\tself.head = None", "def __init__(self, data=None):\n self.head = None # Node\n self.tail = None # Node\n self.size = 0 # Integer\n\n if data:\n [self.push_back(i) for i in data]", "def __init__(self, d, n=None):\n self.data = d\n self.next_node = n\n # adding a hash value for the data\n self.hash = self.generate_hash()\n return None", "def __init__(self, iterator):\n self.iterator = []\n while iterator.hasNext():\n self.iterator.append(iterator.next())", "def __init__(self, items):\n if len(items) == 0:\n self._first = None\n self._rest = None\n else:\n self._first = items[0]\n self._rest = LinkedListRec(items[1:])", "def __init__(self, iterator):\n self.iterator, self.p = iterator, None", "def __init__(self, val):\n self.val = val\n self.next = None", "def __init__(self):\n self.node = None\n self.data = None", "def __init__(self, iterator):\n self.iterator = iterator\n self.peek_num = None", "def __init__(self, data=[]):\n self.data = data\n self.count = 0\n self.value = ''", "def __init__(self, data=b''):\n self.data = data\n self.offset = 0", "def __init__(self):\n self.head = None\n self.tail = None", "def __init__(self):\n self.head = None\n self.tail = None", "def __init__(self, l):\n self.l = l\n self.next = None\n self.prev = None\n self.prev_n = -1\n self.next_n = -1", "def __init__(self):\n self.head = None\n self.tail = self.head", "def __init__(self, iterator):\n super().__init__(iterator,\n join=lambda x: x, empty=lambda x: [],\n init=lambda content, index: content)", "def __init__(self, data):\n self.data = data\n return", "def __init__(self):\n\n self.head = None", "def __init__(self):\n\n self.head = None", "def __init__(self):\n\n self.head = None", "def __init__(self, data):\n self._data = data\n self._parent = None\n self._rkid = None\n self._lkid = None", "def __init__(self):\n self.head = None\n self.length = 0", "def __init__(self):\n self.head = None\n self.length = 0", "def next(self):\n raise NotImplementedError", "def __init__(self, data, node):\n self.data = data\n self.node = node # This is the data structure which holds the data for this node, e.g. lat, lon, etc.", "def __init__(self, data):\n self.data = self.get_api_reference_html(data)\n self.parsed_data = []", "def next(self):\r\n pass", "def __init__(self, key, value):\n self.key = key\n self.value = value\n self.next = None", "def __init__(self):\n\t\tself.state = None\n\t\tself.info = None\n\t\tself.next = None", "def setNext(self, next):\n\t\t\tself.next = next", "def __init__(self, data):\n self.data = data", "def __init__(self, data):\n self.data = data", "def __init__(self, data):\n self.data = data", "def __init__(self, data):\n self.data = data", "def __init__(self, key: str, value: object) -> None:\n self.next = None\n self.key = key\n self.value = value", "def __init__(self, key: str, value: object) -> None:\n self.next = None\n self.key = key\n self.value = value", "def __init__(self):\n self.__head = None", "def __init__(self):\n self.__head = None", "def __init__(self):\n\n self.__head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None" ]
[ "0.8599574", "0.85121983", "0.84967536", "0.8359982", "0.8315263", "0.8315263", "0.81818867", "0.8087428", "0.80368114", "0.8025065", "0.8019018", "0.7919946", "0.76897675", "0.7610863", "0.7575822", "0.75584215", "0.7526036", "0.7419128", "0.72142625", "0.7198809", "0.7086519", "0.7025634", "0.6922162", "0.6886401", "0.6788423", "0.67782843", "0.6757047", "0.67059803", "0.66628885", "0.65939945", "0.6551229", "0.6488291", "0.64133114", "0.64126194", "0.63148534", "0.62754786", "0.6260708", "0.6235648", "0.62264067", "0.6224455", "0.62216127", "0.6214615", "0.6212008", "0.61728245", "0.61302227", "0.61260927", "0.6080604", "0.60651165", "0.60583484", "0.60505277", "0.6041108", "0.6028746", "0.5994333", "0.5971437", "0.5962069", "0.59589714", "0.5950874", "0.59049124", "0.58976007", "0.58976007", "0.58936256", "0.58927584", "0.58748", "0.58738697", "0.58733356", "0.58733356", "0.58733356", "0.58676374", "0.5865233", "0.5865233", "0.58583766", "0.58543795", "0.5853191", "0.5832898", "0.58311105", "0.58298266", "0.5821664", "0.58186555", "0.58186555", "0.58186555", "0.58186555", "0.58169025", "0.58169025", "0.5810387", "0.5810387", "0.5809967", "0.58088195", "0.58088195", "0.58088195", "0.58088195", "0.58088195", "0.58088195", "0.58088195", "0.58088195", "0.58088195", "0.58088195", "0.58088195", "0.58088195", "0.58088195" ]
0.8276122
7
Representation of the linked list
def __repr__(self): return "LinkedList([{}],{}/{})".format(self.cur_node, self.cur_pos, self.length)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self):\n\n return \"LinkedList created\"", "def __repr__(self):\r\n return \"ListNode({})\".format(self.data)", "def __repr__(self):\n return 'LinkedList({!r})'.format(self.items())", "def __repr__(self):\n return \"{}\".format(self._head)", "def __init__(self):\n self.head = ListNode()", "def __init__(self):\n node = ListNode(0) # dummy\n self.head = node\n self.tail = node\n self.len = 0", "def simple_ll():\n ll = LinkedList()\n ll.push(20)\n ll.push(4)\n ll.push(15)\n ll.push(85)\n return ll", "def __init__(self):\n\n self.head = linkNode()\n self.tail = None\n # print(self.head.val)", "def __init__(self, head: ListNode):\n self.head = head\n self.list = []\n while head:\n self.list.append(head.val)\n head = head.next", "def __init__(self, linked_list: object):\n self.current_node = linked_list._head", "def __repr__(self):\n return '<List %r>' % (self.name)", "def __init__(self, head: ListNode):\n self.l = []\n while head:\n self.l.append(head.val)\n head = head.next", "def l1():\n head = l1 = ListNode(3)\n l1.next = ListNode(4)\n l1.next.next = ListNode(5)\n return head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self):\n self.head = None\n self.tail = None\n self.current_node = None", "def __init__(self, lst=[]):\r\n self.__length = 0 # current length of the linked list\r\n self.__head = None # pointer to the first node in the list\r\n self.__last = None # pointer to the last node in the list\r\n lst.reverse() # reverse to ensure elements will appear in same order\r\n for e in lst: # add elements of input list lst one by one\r\n self.add(e)", "def __repr__(self):\n\n nodes = []\n current = self.head\n\n while current:\n if current is self.head:\n nodes.append('[Head: %s]' % current.data)\n elif current.next_node is None:\n nodes.append('[Tail: %s]' % current.data)\n else:\n nodes.append('[%s]' % current.data)\n current = current.next_node\n\n return '-> '.join(nodes)", "def get_list_node(self):\n return self.list_node", "def __init__(self):\n self.head = None\n self.tail = None", "def __init__(self):\n self.head = None\n self.tail = None", "def __init__(self):\n self.head = None\n self.tail = self.head", "def __init__(self):\n self.head = None\n self.length = 0", "def __init__(self):\n self.head = None\n self.length = 0", "def __init__(self, head: ListNode):\n self.nodes = []\n\n while(head):\n self.nodes.append(head)\n head = head.next", "def __init__(self):\n self.head = None\n self.tail = None\n self.size = 0", "def __init__(self):\n\t\tself.current = None\n\t\tself.head = None", "def __init__(self, lst=[]):\n self.__length = 0 # current length of the linked list\n self.__head = None # pointer to the first node in the list\n for e in lst: # initialize the list,\n self.add(e) # by adding elements one by one", "def __init__(self, l):\n self.l = l\n self.next = None\n self.prev = None\n self.prev_n = -1\n self.next_n = -1", "def __init__(self):\n\n self.head = None\n self.tail = None\n self.size = 0", "def __init__(self):\n self.head = None\n self.size = 0", "def __init__(self):\n self.head = None\n self.size = 0", "def __init__(self):\n self.head = None\n self.size = 0", "def __repr__(self):\n nodes = []\n curr = self.head\n while curr:\n nodes.append(repr(curr))\n curr = curr.next\n return '[' + ', '.join(nodes) + ']'", "def __repr__(self):\n nodes = []\n curr = self.head\n while curr:\n nodes.append(repr(curr))\n curr = curr.next\n return '[' + ', '.join(nodes) + ']'", "def __init__(self):\n\t\tself.head = None\n\t\tself.tail = None", "def __init__(self):\n\n self.head = None\n self.node_count = 0", "def __repr__(self):\n nodes = []\n current = self.head\n while current:\n nodes.append(repr(current))\n current = current.next\n\n return '[' + ','.join(nodes) + ']'", "def __init__(self, head):\n self.head = head\n self.length = 0\n node = head\n while node:\n node = node.next\n self.length += 1", "def __init__(self, head: ListNode):\n self.head = head\n temp = head\n i = 0\n while temp is not None:\n i+=1\n temp = temp.next\n self.len = i # 找到list的长度", "def __init__(self):\r\n self._head = None\r\n self._tail = None\r\n self._size = 0", "def lstToLinkedList(lst):\n if not lst: return\n LinkedList = Node(lst[0])\n LinkedList.next = lstToLinkedList(lst[1:])\n return LinkedList", "def show(self):\n if self.empty():\n return \"Linked List is Empty\"\n\n l = self.head\n while l is not None:\n print(l.data, end=\" ----> \")\n l = l.next\n print()\n return", "def __init__(self):\n self._head = None\n self._tail = None\n self._size = 0", "def __init__(self):\n self._head = None\n self._tail = None\n self._size = 0", "def __init__(self):\n self._head = None\n self._tail = None\n self._size = 0", "def __init__(self):\n self.length = 0\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def __init__(self):\n self.head = None", "def l2():\n head = l2 = ListNode(2)\n l2.next = ListNode(4)\n l2.next.next = ListNode(5)\n return head", "def to_string(self):\n try:\n items = \" \"\n current = self.head\n while current:\n items += f\"{ {current.value} }->\"\n current=current.next\n items+=\"NULL\"\n print (items)\n return items\n # items.append(current.value)\n # current = current.next\n # print(''.join(f\"{ {k[1]} }->\" for k in enumerate(items))+'NULL')\n # return(''.join(f\"{ {k[1]} }->\" for k in enumerate(items))+'NULL')\n except Exception as error:\n print (f\"There is error in __init__ of LinkedList, the error {error}\")", "def __init__(self):\n\n self.head = None", "def __init__(self):\n\n self.head = None", "def __init__(self):\n\n self.head = None", "def __init__(self):\n self._head = self._tail = None\n self._size = 0", "def __init__(self):\n self.head = None\n self.tail = None\n self.count = 0", "def __init__(self, head):\n self.head = head", "def __init__(self, head):\n self.head = head", "def __init__(self, head):\n self.head = head", "def __init__(self, head):\n self.head = head", "def __init__(self, value, next=None):\n self.value = value # element at the node\n self.next = next # reference to next node in the LinkedList", "def __init__(self):\n self.dummy = ListNode(-1)\n self.cnt = 0", "def list_print(self):\n node = self.cur_node # cant point to ll!\n while node:\n print(node.data)\n node = node.next", "def __to_list__(self):\r\n out = []\r\n node = self.head\r\n while node:\r\n out.append(node.value)\r\n node = node.next\r\n return out", "def create_linked_list(input_list):\n head=None\n for value in input_list:\n if head is None:\n head=Node(value)\n else:\n current_node=head\n while current_node.next:\n current_node=current_node.next\n current_node.next=Node(value)\n# printlist(head)\n# print('------')\n return head", "def __init__(self):\n\t\tself._head = None\n\t\tself._tail = None\n\t\tself._size = 0", "def printList(self): \r\n aux = self.head \r\n while(aux): \r\n print(aux.data , end = ' ') \r\n aux = aux.next", "def print_list(self):\n\n current = self.head\n\n while current is not None:\n print current.data\n current = current.next", "def __init__(self):\r\n self.head = None", "def __init__(self):\n self.__head = None", "def __init__(self):\n self.__head = None", "def __init__(self):\n self.size = 0\n self.head, self.tail = Node(0), Node(0)\n self.head.next = self.tail\n self.tail.prev = self.head", "def __repr__(self):\n temp_node = self.head\n values = []\n if temp_node is None:\n return str([])\n while temp_node is not None:\n values.append(temp_node.value)\n temp_node = temp_node.next_node\n return str(values)", "def __init__(self):\n\n self.__head = None", "def test_lined_list_create_with_non_iterable():\n from linked_list import Linked_List\n new_linked_list = Linked_List(-100)\n assert new_linked_list.head.value == -100", "def __str__(self):\n\n return self._fold_loop(lambda x, y: y + \"%s -> \" % x, \"LinkedList [\",\\\n self._head) + \"None]\"", "def print_list(self):\n p = self.head\n i = 0\n\n while i < self.size():\n print(p.data)\n i += 1\n p = p.next_node", "def __str__(self):\n temp = \"head\"\n temp_node = self.head\n while temp_node is not None:\n temp += f' -> {temp_node.val}'\n temp_node = temp_node.next\n temp += f'-> None'\n return temp", "def show(self):\n\n traverse = self.head\n\n if self.head == None:\n print(\"Linked List is empty\")\n return\n\n while traverse.next != None:\n print(traverse.data)\n traverse = traverse.next\n\n print(traverse.data)", "def __init__(self, capacity: int):\n self.cpty = capacity\n self.htab = dict() #hash table \n self.head = ListNode() #doubly linked list\n self.tail = ListNode()\n self.head.next = self.tail\n self.tail.prev = self.head", "def display_content(self):\n list = []\n traverse = self.head\n\n if self.head == None:\n # print(\"Linked List is empty\")\n return\n\n while traverse.next != None:\n list.append(traverse.data)\n traverse = traverse.next\n\n list.append(traverse.data)\n return list", "def linked_node(self):\n return self._linked_node", "def __str__(self) -> str:\n content = ''\n if self.head is not None:\n content = str(self.head)\n cur = self.head.next\n while cur is not None:\n content += ' -> ' + str(cur)\n cur = cur.next\n return 'SLL [' + content + ']'", "def __str__(self) -> str:\n content = ''\n if self.head is not None:\n content = str(self.head)\n cur = self.head.next\n while cur is not None:\n content += ' -> ' + str(cur)\n cur = cur.next\n return 'SLL [' + content + ']'", "def print_list(self):\r\n head = self\r\n tail = self.__next # go to my next node\r\n if tail is not None: # as long as the end of the list has not been reached\r\n print(head, end=\" \") # print my head\r\n tail.print_list() # recursively print remainder of the list\r\n else: # print the last element\r\n print(head, end=\" \")", "def lstToLinkedList(lst):\n if not lst: return\n LinkedList = Node(lst[0])\n LinkedList.next = lstToLinkedList(lst[1:])\n return LinkedList" ]
[ "0.77007776", "0.76717454", "0.7663137", "0.7242428", "0.72094405", "0.7121683", "0.7095792", "0.6991777", "0.6974106", "0.69680715", "0.69224", "0.68925244", "0.6854505", "0.6827989", "0.6827989", "0.6827989", "0.6827989", "0.6827989", "0.6827928", "0.67615014", "0.6742917", "0.67423606", "0.67416453", "0.67416453", "0.67233217", "0.667861", "0.667861", "0.66432977", "0.6643191", "0.66429615", "0.6629068", "0.6624825", "0.6616974", "0.66139877", "0.66139877", "0.66139877", "0.66138905", "0.66138905", "0.66132426", "0.65733165", "0.6563024", "0.65398914", "0.6524529", "0.6516472", "0.65088093", "0.6469326", "0.6462727", "0.6462727", "0.6462727", "0.6452428", "0.6437798", "0.6437798", "0.6437798", "0.6437798", "0.6437798", "0.6437798", "0.6437798", "0.6437798", "0.6437798", "0.6437798", "0.6437798", "0.6437798", "0.6437798", "0.6437466", "0.64362365", "0.6430643", "0.6430643", "0.6430643", "0.64305466", "0.64245147", "0.6421487", "0.6421487", "0.6421487", "0.6421487", "0.6410606", "0.6406499", "0.6400552", "0.63826275", "0.6374771", "0.63632786", "0.63576967", "0.63572323", "0.6356113", "0.63550454", "0.63550454", "0.63396865", "0.6326046", "0.63229114", "0.63143", "0.6311438", "0.63089514", "0.62835187", "0.6275611", "0.6271336", "0.62696075", "0.62485653", "0.6248414", "0.6248414", "0.62483525", "0.6235655" ]
0.76858366
1
Insert a new node into the linked list
def add_node(self, data): new_node = Node(data) if self.cur_node is not None: new_node.next, self.cur_node.next = self.cur_node.next, new_node self.cur_node = new_node self.length += 1 self.cur_pos += 1 if self.start_node is None: self.start_node = self.cur_node # print("Node({}) added to {}".format(new_node.data, self.cur_pos-1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_node(self, head, node):\n prev, curr = None, head\n while curr.val < node.val:\n prev, curr = curr, curr.next\n if not prev:\n head = node\n else:\n prev.next = node\n node.next = curr\n return head", "def insert(self, data):\n if self.head == None:\n self.head = Node(data)\n else:\n curr = self.head\n while curr.link != None:\n curr = curr.link\n curr.link = Node(data)", "def __insert(self, node, value):\n #if DEBUG: print('\\t__insert({})'.format(value))\n\n new = Node(value, node.next)\n node.next = new\n return new", "def insert(self, val):\n new_node = Node(val)\n new_node.next = self.head\n self.head = new_node", "def insert(self, value):\n self.head = Node(value, self.head)", "def insert(self, value):\n\n # create new node\n\n # self.head =new_node\n current = self.head\n if current == None:\n self.head = Node(value, self.head)\n return\n while current.next != None:\n current=current.next\n new_node=Node(value)\n\n current.next=new_node", "def insert(self, data):\n new_node = Item(data)\n new_node.next = self.head\n self.head = new_node", "def insert(self, value):\n node = Node(value)\n\n if self.head is not None:\n node.next = self.head\n self.head = node", "def insert(self, key: str, value: object) -> None:\n new_node = SLNode(key, value)\n new_node.next = self.head\n self.head = new_node\n self.size = self.size + 1", "def insert(self, key: str, value: object) -> None:\n new_node = SLNode(key, value)\n new_node.next = self.head\n self.head = new_node\n self.size = self.size + 1", "def insert(self, value):\n\n node = Node(value)\n node.next = self.head\n self.head = node", "def insert(self, value):\n\n node = Node(value)\n node.next = self.head\n self.head = node", "def insert(self, new_element, position):\n current = self.head\n index = 1\n \n if position == 1:\n current.next = new_element\n \n if position > 1:\n while index!= position - 1:\n current = current.next\n index += 1\n new_element.next = current.next\n current.next = new_element", "def insert(self, n, new_node):\n curr_node = self.head\n \n i = 0\n while i < n:\n if curr_node.next is None:\n raise IndexError(\"list is shorter than n\")\n curr_node = curr_node.next\n i += 1\n \n new_node.next = curr_node.next\n curr_node.next = new_node\n \n return None", "def insert_after(self,node,new_node):\n new_node.next = node.next\n node.next = new_node", "def insert_after(self, prev_node, new_data):\n if prev_node is None:\n print(\"Node is not in Linked List\")\n return\n \n # create a new node to put new data.\n new_node = Node(new_data)\n new_node.next = prev_node.next\n prev_node.next = new_node", "def insert_append(self, data):\n node = Node(data)\n if self.is_empty():\n self.head = node\n else:\n cur = self.head\n while cur.next != None:\n cur = cur.next\n cur.next = node\n node.prev = cur", "def addNode(self, new_value): # Class O(n)\r\n if type(new_value) is not int: raise ValueError(\"Please, insert an integer\")\r\n h = self.head\r\n while 'next' in dir(h.next):\r\n h = h.next\r\n else:\r\n h.next = Node(new_value)", "def insert(self, data, index):\n if index == 0:\n self.prepend(data)\n return\n\n current_index = 0\n current = self.head\n previous = None\n\n while current or previous:\n if current_index == index:\n new_node = Node(data)\n new_node.next = current\n previous.next = new_node\n break\n\n previous = current\n current = current.next\n current_index += 1", "def insert_after(node, new_node):\n new_node.next = node.next\n node.next = new_node", "def insert_node(self, data):\n\t\tif self.root is None:\n\t\t\tself.root = Node(data)\n\t\telse:\n\t\t\tcurrent_node = self.root\n\t\t\twhile current_node.next is not None:\n\t\t\t\tcurrent_node = current_node.next\n\t\t\tcurrent_node.next = Node(data, current_node)", "def push(self, new_node):\n \n if self.head is None:\n self.head = new_node\n return None\n \n new_node.next = self.head\n self.head = new_node", "def insert(self, new_element, position):\n count=1\n current = self.head\n if position == 1:\n new_element.next = self.head\n self.head = new_element\n while current:\n if count+1 == position:\n new_element.next =current.next\n current.next = new_element\n return\n else:\n count += 1\n current = current.next\n # break\n pass", "def insert(self, new_element, position):\n current = self.head\n count = 1\n \n if position == 1:\n new_element.next = current\n self.head = new_element\n # elif not(isinstance(self.get_pos(pos), str)): # use: type(self.get_pos(pos)) == str\n else:\n while count < position-1:\n current = current.next\n count += 1\n new_element.next = current.next\n current.next = new_element", "def insert(self,x,pos):\n new = ListNode()\n new.value = x\n new.next = pos.next\n pos.next = new", "def insert(self,value):\n try:\n new_node=Node(value)\n if self.head == None:\n self.head=new_node\n else:\n current=self.head\n while current.next:\n current=current.next\n current.next=new_node\n print( new_node.value)\n return( new_node.value)\n except Exception as error:\n print (f\"There is error in __init__ of LinkedList, the error {error}\")", "def insert_start(self, data):\n\n if self.head is None:\n self.head = ListNode(data)\n else:\n temp = self.head\n self.head = ListNode(data)\n self.head.next = temp", "def insert_head(self, data):\n node = Node(data)\n if self.is_empty():\n self.head = node\n else:\n node.next = self.head\n self.head.prev = node\n self.head = node", "def insert(self, position, data):\n\n node = Node(data)\n traverse = self.head\n\n for i in range(0, position - 1):\n traverse = traverse.next\n temp = traverse.next\n traverse.next = node\n node.next = temp", "def addNode(self, new_data):\r\n curr = self.head\r\n\r\n # Add new Node\r\n if curr is None:\r\n n = Node(new_data) \r\n self.head = n\r\n return\r\n \r\n # Sort Nodes \r\n if curr.data > new_data:\r\n n = Node(new_data) \r\n n.next = curr\r\n self.head = n\r\n return\r\n\r\n while curr.next is not None:\r\n if curr.next.data > new_data:\r\n break\r\n curr = curr.next\r\n n = Node(new_data) \r\n n.next = curr.next\r\n curr.next = n\r\n return", "def insert_before(self, node_to_insert_before, data):\n node = Node(data)\n temp = self.head\n if temp.data == node_to_insert_before:\n node.next = temp\n self.head = node\n return\n while temp.next:\n if temp.next.data == node_to_insert_before:\n break\n temp = temp.next\n if not temp.next:\n print('Item doesn\\t exist')\n return\n node.next = temp.next\n temp.next = node", "def insert(self, index, item):\n \n # Create a new node\n new_code = Node(item)\n \n # Go to node (index - 1)\n curr = self.first\n for i in range(index - 1):\n curr = curr.next\n \n old_next_node = curr.next \n # Update curr's next attribute\n curr.next = new_node\n \n # Update new node's next attribute\n new_node.next = old_next_node", "def insert(self, data, index):\n if index == 0:\n self.add(data)\n\n if index > 0:\n new = Node(data)\n position = index # Cada que se llama a current = current.next_node, se decrementa el valor de position en 1, cuando el valor sea cero, se ha llegado al nodo que está actualmente en la posición que queremos insertar el nuevo valor\n current = self.head\n\n while position > 1:\n current = current.next_node\n position -= 1\n \n prev_node = current\n next_node = current.next_node\n\n prev_node.next_node = new\n new.next_node = next_node", "def insert(self, new_element, position):\n current = self.head\n count = 1\n if position > 1:\n while ((current)and (count < position)):\n if(count == position-1):\n\n new_element.next=current.next\n current.next = new_element\n break\n #print(\"count\",count)\n current = current.next\n count = count + 1\n elif position == 1:\n new_element.next = self.head\n self.head = new_element\n\n pass", "def insert(self, index, data):\n\n n = Node(data)\n\n if self.empty() and index != 0:\n print(\"Linked List is Empty hence value cannot be added to index: \", index)\n return\n\n size = self.size()\n\n if index > size:\n print(\"Size of the Linked List is less than the index\")\n return\n\n if index is size:\n return self.push(data)\n\n idx = 0\n h = self.head\n previous = self.head\n while h.next is not None:\n if idx is index:\n if previous is not h:\n previous.next = n\n n.next = h\n else:\n self.head = n\n self.head.next = h\n h = n\n return\n idx += 1\n previous = h\n h = h.next", "def insert(self, value):\n old_head = self.head\n self.head = Node(value, old_head)\n if self.count > 0: # if any Nodes: set tail previous to current Node\n old_head.next = self.head\n else: # adding to an empty, than define front\n self.tail = self.head\n self.count += 1", "def push_node(self, node):\n n = node\n if self.empty():\n self.head = n\n return\n\n l = self.head\n while l.next is not None:\n l = l.next\n l.next = n\n return", "def add_node(self, node):\n temp = self.head.post\n self.head.post = node\n node.pre = self.head\n node.post = temp\n temp.pre = node", "def insert(self, val):\n inserted_node = DblNode(val, self.head)\n if not self.head:\n self.head = inserted_node\n self.tail = self.head\n self.head.previous_node = inserted_node\n self.head = inserted_node", "def insert_before_item(self, x, data):\n if self.head is None:\n raise ValueError(\"No elements in list\")\n if x == self.head.data:\n new_node = SingleNode(data)\n new_node.next = self.head\n self.head = new_node\n return\n n = self.head\n while n.next is not None:\n if n.next.data == x:\n break\n n = n.next\n if n.next is None:\n print(\"Item is not in the list\")\n else:\n new_node = SingleNode(data)\n new_node.next = n.next\n n.next = new_node", "def insert(self, pos, element):\n if pos <= 0:\n self.add(element)\n elif pos >= self.length():\n self.append(element)\n else:\n node = Node(element)\n cursor = self.head\n for i in range(pos-1):\n cursor = cursor.next\n node.next = cursor.next\n node.prev = cursor\n cursor.next.prev = node\n cursor.next = node", "def insert(self, pos, data):\n assert pos >= 0\n if pos >= self.size(): # todo: support to insert node in end of the list\n raise Exception(\"pos:%d is out of index:%d\" % (pos, self.size()-1))\n\n last = None\n current = self.head\n count = -1\n while current is not None:\n count += 1\n if count == pos:\n node = Node(data)\n\n if last is None:\n node.next = self.head\n self.head = node\n else:\n node.next = current\n last.next = node\n\n return\n\n last = current\n current = current.next", "def add_before(self, data, new_node):\n if not self.head:\n raise Exception(\"List is empty\")\n if self.head.data == data:\n return self.add_first(new_node)\n prev_node = self.head\n for node in self:\n if node.data == data:\n prev_node.next = new_node\n new_node.next = node\n return\n prev_node = node\n raise Exception(\"Node with data '{}' not found\".format(data))", "def insert_after(self, node_to_insert_after, data):\n node = Node(data)\n temp = self.head\n while temp:\n if temp.data == node_to_insert_after:\n break\n temp = temp.next\n if not temp:\n print('Item does not exist')\n return\n node.next = temp.next\n temp.next = node", "def insert(self, pos, item):\n \n if pos == 0:\n self.add(item)\n \n elif pos >= self.length():\n self.append(item)\n \n else:\n previous = None\n current = self.head\n \n for _ in range(pos):\n previous = current\n current = current.get_next()\n \n n = Node(item)\n previous.set_next(n)\n n.set_next(current)", "def insert(self, value, pos):\r\n\r\n if self.head is None:\r\n self.head = Node(value)\r\n return\r\n\r\n if pos == 0:\r\n self.prepend(value)\r\n return\r\n\r\n index = 0\r\n node = self.head\r\n while node.next and index <= pos:\r\n if (pos - 1) == index:\r\n new_node = Node(value)\r\n new_node.next = node.next\r\n node.next = new_node\r\n return\r\n\r\n index += 1\r\n node = node.next\r\n else:\r\n self.append(value)", "def add_node(self, node):", "def insert_at_index(self, data, index):\n new_node = SingleNode(data)\n if index < 1:\n raise IndexError(\"Index out of bonds\")\n elif index == 1:\n new_node.next = self.head\n self.head = new_node\n else:\n temp = self.head\n for i in range(1, index-1):\n if temp is not None:\n temp = temp.next\n if temp is not None:\n new_node.next = temp.next\n temp.next = new_node\n else:\n print(\"The previous node is None\")", "def push(self, new_data):\n new_node = Node(new_data)\n new_node.next = self.head\n self.head = new_node", "def append_node(self, new_data):\n\n #create a new node and put new data.\n new_node = Node(new_data)\n\n if self.head is None:\n self.head = new_node\n return\n\n end = self.head\n while end.next:\n end = end.next\n\n end.next = new_node", "def add(self, item):\n \n n = Node(item)\n n.set_next(self.head)\n self.head = n", "def insert_after_item(self, x, data):\n n = self.head\n while n is not None:\n if n.data == x:\n break\n n = n.next\n if n is None:\n raise Exception(\"Item not in list\")\n else:\n new_node = SingleNode(data)\n new_node.next = n.next\n n.next = new_node", "def push(self, new_data):\n new_node = Node(new_data)\n new_node.next = self.head\n self.head = new_node", "def add(self, item):\n node = Node(item)\n node.next = self.head\n self.head = node", "def insert(self, index, value):\n # check the validity of index\n if index < 0 or index > self.n: # larger than no. of items\n print(\"Index Error; please input valid index\")\n return\n # if index==0, same as push_front\n if index==0:\n self.push_front(value)\n return\n # else,\n new_node = Node(value)\n temp_node = self.head\n for _ in range(index-1):\n temp_node = temp_node.next # traverse the list\n new_node.next = temp_node.next # temp_node is index-1 node\n temp_node.next = new_node\n self.n += 1", "def insert(self, index, item):\n if 1 <= index <= self.count + 1:\n if self.count == 0: #laat item naar zichzelf wijzen, zodat als er een tweede item komt er een circulaire ketting gevormd kan worden\n self.head.next = item\n item.next = item\n if index == 1: #speciaal geval waar de pointer van de head naar het nieuwe item moet wijzen\n firstItem = self.head.next\n item.next = firstItem.next\n firstItem.next = item\n self.head.next = item #head moet naar het nieuwe item wijzen\n else:\n prev = self.head\n for teller in range(1, index): #zoek de node die net op plaats index-1 staat\n prev = prev.next\n item.next = prev.next\n prev.next = item\n self.count += 1\n return True\n else:\n return False", "def add_after(self, data, new_node):\n if not self.head:\n raise Exception(\"List is empty\")\n for node in self:\n if node.data == data:\n new_node.next = node.next\n node.next = new_node\n return\n raise Exception(\"Node with data '{}' not found\".format(data))", "def insert_at_position(self, position, data):\n node = Node(data)\n if not self.head:\n self.head = node\n return\n if position == 1:\n node.next = self.head\n self.head = node\n return\n temp = self.head\n for _ in range(1, position - 1):\n if not temp:\n print('Index out of bound')\n return\n temp = temp.next\n node.next = temp.next\n temp.next = node", "def test_insert_node(self):\r\n myObj = DLinkedList()\r\n myObj.append(120)\r\n myObj.append(100)\r\n self.assertEqual(myObj.insert_node(Node(1000), myObj.head), [120, 1000, 100])", "def insert(self, course):\n new_node = Node(course)\n\n if self.head is None or self.head.data.number() >= new_node.data.number():\n new_node.next = self.head\n self.head = new_node\n self._size += 1\n return\n\n cur_node = self.head\n while cur_node.next and cur_node.next.data.number() < new_node.data.number():\n cur_node = cur_node.next\n new_node.next = cur_node.next\n cur_node.next = new_node\n self._size += 1", "def insert(self, i, node_value):\n node = Node(node_value, None)\n if i < 0 or i > self.num_elements:\n raise IndexError(\"Insert index is out of range.\")\n if i == 0:\n node.next = self.head\n self.head = node\n else:\n current_node = self.head\n for j in xrange(i - 1):\n current_node = current_node.next\n node.next = current_node.next\n current_node.next = node\n self.num_elements += 1", "def add(self, item):\n # must keep two pointers marching\n # in synch down the list.\n current = self._head\n previous = None\n while current != None:\n if current.getData() > item:\n # we’ve reached the insertion spot\n break\n else:\n # otherwise, advance both pointers\n previous = current\n current = current.getNext()\n temp = Node(item)\n if previous == None:\n # insert at the start of the list\n temp.setNext(self._head)\n self._head = temp\n else:\n temp.setNext(current)\n previous.setNext(temp)", "def insert(self, value):\n if self.is_empty(): # no element in linked list\n self._tail = self._head = self.Node(value, None)\n else:\n if value <= self._head.value:\n self._head = self.Node(value, self._head)\n elif value >= self._tail.value:\n self._tail.next = self.Node(value, None)\n self._tail = self._tail.next\n else:\n current = self._head\n while current.next.value <= value:\n current = current.next\n current.next = self.Node(value, current.next)\n self._size += 1", "def insert(self, value, ident):\n print(\"Insert\", value, ident)\n found_on_next_node = self._pop_node(ident)\n # if found_on_next_node:\n # print(\"Found node:\"\n # ,found_on_next_node.value\n # ,found_on_next_node.ident\n # )\n self._insert_node(value, ident)", "def insert(self,node,index=None):\n if index==None: index=self.size\n if index>self.size: raise IndexError(\"Index out of range.\")\n if not isinstance(node,DoublyLinkedNode): node = DoublyLinkedNode(node)\n if self.size==0: self.reference, node.previous, node.next = 3*[node]\n elif self.size==1: self.reference.previous, self.reference.next, node.previous, node.next = 2*[node] + 2*[self.reference]\n else:\n previous_pointer, pointer = self.reference.previous, self.reference\n for i in range(index): previous_pointer, pointer = pointer, pointer.next\n previous_pointer.next, node.previous, node.next, pointer.previous = node, previous_pointer, pointer, node\n if index==0: self.reference=node\n self.size+=1", "def insert_node_after_item(self, item, new_node):\n curr_node = self.head\n \n while curr_node is not None:\n if curr_node.data == item:\n new_node.next = curr_node.next\n curr_node.next = new_node\n return None\n elif curr_node.data != item and curr_node.next is None:\n raise ItemNotFoundError(\"please check your input and try again\")\n curr_node = curr_node.next", "def insert(self, index, value):\n if self.head is None:\n self.append(value)\n return\n \n from_head = True if index >= 0 else False \n if from_head: \n node = self.head\n steps = index \n else:\n node = self.tail \n steps = abs(index) -1 \n while steps > 0 and node is not None:\n node = node.next_node if from_head else node.prev_node \n steps -= 1 \n \n if node is None:\n if from_head: \n self.append(value)\n return\n else:\n self.push_front(value)\n return\n if node is self.head:\n self.push_front(value)\n return\n else:\n new_node = DLLNode(value)\n new_node.next_node = node\n new_node.prev_node = node.prev_node\n node.prev_node.next_node = new_node\n node.prev_node = new_node \n return", "def insert(self, n, pos):\n if pos == 0:\n self.cons(n)\n else:\n prev = self.index(pos-1)\n next = prev.next\n prev.next = n\n n.next = next\n self.len += 1", "def sorted_insert(self, value):\n if self.__head is None:\n self.__head = Node(value, None)\n elif value < self.__head.data:\n self.__head = Node(value, self.__head)\n else:\n n = self.__head\n while n.next_node is not None and n.next_node.data <= value:\n n = n.next_node\n new_node = Node(value, n.next_node)\n n.next_node = new_node", "def sorted_insert(self, value):\n new = Node(value)\n if self.__head is None:\n self.__head = new\n return\n\n cur = self.__head\n if new.data < cur.data:\n new.next_node = self.__head\n self.__head = new\n return\n\n while (cur.next_node is not None) and (new.data > cur.next_node.data):\n cur = cur.next_node\n\n new.next_node = cur.next_node\n cur.next_node = new\n return", "def insert(self, article, count):\n new_node = Node(article, count)\n if self.head is None:\n self.head = new_node\n return \n latest = self.head \n while latest.next is not None:\n latest = latest.next \n latest.next = new_node", "def insertTail(head, data):\n # Using the iterative solution. Recursive also exists, but I don't\n # think it offers any benifit in space/time complexity\n if head is None: # First the initial/null case:\n return ListNode(val=data)\n node = head # Then the general case - scroll tot the end of the list\n while node.next is not None:\n node = node.next\n node.next = ListNode(val=data) # tack on the new value\n return head", "def append(self, new_node):\n \n if self.head is None:\n self.head = new_node\n return None\n\n curr_node = self.head\n while curr_node.next is not None:\n curr_node = curr_node.next\n curr_node.next = new_node", "def insert_end(self, data):\n\n if self.head is None:\n self.head = ListNode(data)\n else:\n temp = self.head\n while temp.next is not None:\n temp = temp.next\n temp.next = ListNode(data)", "def prepend(self, data):\n new_node = Node(data)\n\n new_node.next = self.head\n self.head = new_node", "def addNodeBefore(self, new_value, before_node): # Class O(n)\r\n if not isinstance(new_value, Node):\r\n if new_value % 1 != 0: raise ValueError(\"Please, insert an integer\")\r\n if before_node > self.length(): raise ValueError(\"Invalid position\")\r\n if before_node == 1:\r\n self.head = Node(new_value, self.head)\r\n else:\r\n self.addNodeAfter(new_value, before_node - 1)", "def add_node(self, state, other):\n\t\tnew_node = Node()\n\t\tnew_node.state = state\n\t\tnew_node.info = other\n\n\t\tif self.head == None:\n\t\t\tself.current = new_node\n\t\t\tself.head = new_node\n\t\telse:\n\t\t\tself.current.next = new_node\n\t\t\tself.current = self.current.next", "def add_node(self, node):\n self.nodes.append(node)", "def add_to_head(self, value):\n node = Node(value)\n if self.head is not None:\n node.set_next(self.head)\n\n self.head = node", "def push(self, node):\n self.prepend(node)", "def _add_node(self, node):\n node.prev = self.head\n node.next = self.head.next\n\n self.head.next.prev = node\n self.head.next = node", "def test_insert_head_one_element_list_2(test_linkedlist):\n test_linkedlist.insert_head('A')\n test_linkedlist.insert_head('B')\n assert test_linkedlist.head.next.data == 'A'", "def insertBefore(self,p,e):\r\n \r\n if p == self.head: #if p is the head node\r\n e.next = p #link e to p\r\n p.prev = e #link p to e\r\n self.head = e #set e to be the 'new' head node\r\n \r\n else: \r\n e.prev = p.prev #link e to prev node of p\r\n e.next = p #link e to p\r\n (p.prev).next = e #link prev node of p to e\r\n p.prev = e #link prev node to e\r\n \r\n self.size +=1 #increase length of linked list by 1\r", "def addAtHead(self, val):\n node = ListNode(val)\n if self.head == None:\n self.head = node\n else:\n node.next = self.head\n self.head = node", "def insert(self, node):\n if node is None:\n return\n if node.key < self.key:\n if self.left is None:\n node.parent = self\n self.left = node\n else:\n self.left.insert(node)\n else:\n if self.right is None:\n node.parent = self\n self.right = none\n else:\n self.right.insert(node)", "def insert(self, node, after):\n if not isinstance(after, Node):\n # If the after parameter is not a Node raise an error.\n raise TypeError(\"After must be a Node not a %s\" % (type(after)))\n\n if not isinstance(node, Node):\n # If the node parameter is not a Node then update it\n # to refer to one.\n node = Node(node)\n\n node.next = after.next\n after.next = node", "def add_node(self, new_node: 'GraphNode'):\n self.operator.add_node(new_node)", "def sorted_insert(self, value):\n if self.__head is None or self.__head.data > value:\n new_node = Node(value)\n if self.__head is not None:\n new_node.next_node = self.__head\n self.__head = new_node\n else:\n runner = self.__head\n while runner.next_node and value > runner.next_node.data:\n runner = runner.next_node\n runner.next_node = Node(value, runner.next_node)", "def add_first(self, node_to_add):\n node_to_add.next = self.head\n self.head = node_to_add", "def add_to_head(self, value):\n\n new_node = ListNode(value)\n if self.size == 0:\n self.head = new_node\n self.tail = new_node\n\n else:\n new_node.next = self.head\n self.head.prev = new_node\n new_node.next = self.head\n self.head = new_node\n\n # increments the size attribute after adding node to list\n self.size += 1", "def addAtHead(self, val):\n new_node = ListNode(val)\n new_node.next = self.head\n self.head = new_node\n self.length += 1", "def prepend(self, data):\n new_node = Node(data)\n if self.head is None:\n self.head = new_node\n return\n new_node.next = self.head\n self.head = new_node", "def add_node(self, node):\n self.nodes.add(node)", "def sorted_insert(self, value):\n\n new = Node(value)\n if self.__head is None:\n self.__head = new\n elif self.__head.data > value:\n new.next_node = self.__head\n self.__head = new\n else:\n temp = self.__head\n while (temp.next_node is not None and temp.next_node.data < value):\n temp = temp.next_node\n new.next_node = temp.next_node\n temp.next_node = new", "def add_node(self, node):\n self.nodes[node.name] = node\n self.dirty = True", "def insert_beg(self,node):\n if self.head == node:\n raise CircularReference(\n 'Head and next can not be the same ref'\n )\n\n node.next = self.head\n self.head = node", "def register_node(self, node):\n self.nodes.add(node)", "def insert(self, k: int, v: int) -> None:\n i = k % self.capacity\n if not self.data[i]:\n self.data[i] = ListNode(k, v)\n else:\n cur = self.data[i]\n while True:\n if cur.pair[0] == k:\n cur.pair = (k, v)\n return\n if not cur.next:\n break\n cur = cur.next\n cur.next = ListNode(k, v)", "def insert(self, node):\n if node is None:\n return\n if node.key < self.key:\n if self.left is None:\n node.parent = self\n self.left = node\n else:\n self.left.insert(node)\n else:\n if self.right is None:\n node.parent = self\n self.right = node\n else:\n self.right.insert(node)", "def insert_node(self, node):\n if self._is_node_reserved(node):\n return False\n\n # Put node in map\n self._node_map[node.get_id()] = node\n return True" ]
[ "0.8028355", "0.8022881", "0.78825825", "0.7828899", "0.78155625", "0.78046495", "0.7687818", "0.76444274", "0.76059026", "0.76059026", "0.75250113", "0.75250113", "0.7509364", "0.7460348", "0.74056655", "0.73841995", "0.73401856", "0.7332098", "0.7327415", "0.7317284", "0.7280649", "0.7271094", "0.7267072", "0.7263452", "0.72550464", "0.72390795", "0.7237325", "0.7214157", "0.71992767", "0.7198363", "0.71981674", "0.7198044", "0.7189912", "0.718969", "0.7166173", "0.7157532", "0.71358013", "0.7127354", "0.7119859", "0.70677954", "0.7040288", "0.70232284", "0.70228165", "0.7012762", "0.7008847", "0.69933546", "0.6986191", "0.6963032", "0.695919", "0.6956335", "0.69527674", "0.6940256", "0.69303614", "0.6890455", "0.6885922", "0.68775773", "0.6877433", "0.6865567", "0.6838925", "0.68324864", "0.68242913", "0.6823725", "0.6814104", "0.6777253", "0.6772596", "0.6770772", "0.67615914", "0.6758277", "0.67529744", "0.67471004", "0.67317754", "0.6729531", "0.67229784", "0.67158705", "0.668854", "0.6685188", "0.66822886", "0.6660613", "0.6656647", "0.6651936", "0.6643146", "0.6605325", "0.66014034", "0.65993834", "0.65941864", "0.65836364", "0.658064", "0.6578418", "0.6564612", "0.6559557", "0.65589267", "0.6554745", "0.6552544", "0.65492755", "0.6543763", "0.65414566", "0.65354055", "0.65318257", "0.65238905", "0.65215236" ]
0.6689157
74
Print the linked list
def list_print(self): node = self.cur_node # cant point to ll! while node: print(node.data) node = node.next
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_list(self) -> None:\n cur_node = self.head\n while cur_node:\n print(cur_node.data)\n cur_node = cur_node.next", "def print_list(self):\n\n current = self.head\n\n while current is not None:\n print current.data\n current = current.next", "def travel_print(self):\n if self.is_empty():\n print(\"Linked list's length is 0\")\n else:\n node = self.head\n print(\"head -->\", node.data, end=' ')\n while node.next:\n node = node.next\n print(\"-->\", node.data, end=' ')\n print(\" \")", "def printList(self): \r\n aux = self.head \r\n while(aux): \r\n print(aux.data , end = ' ') \r\n aux = aux.next", "def print(self):\n temp = self.head\n while temp.next!=None:\n temp = temp.next\n \n print(temp.value, end= ' ')\n print(\"\")", "def print_list(self):\n p = self.head\n i = 0\n\n while i < self.size():\n print(p.data)\n i += 1\n p = p.next_node", "def show(self):\n if self.empty():\n return \"Linked List is Empty\"\n\n l = self.head\n while l is not None:\n print(l.data, end=\" ----> \")\n l = l.next\n print()\n return", "def print(self):\n current = self.head.next\n for i in range(0,self.count):\n print(current.item)\n current = current.next", "def show(self):\n\n traverse = self.head\n\n if self.head == None:\n print(\"Linked List is empty\")\n return\n\n while traverse.next != None:\n print(traverse.data)\n traverse = traverse.next\n\n print(traverse.data)", "def showListFromNode(self, node):\n if self.empty():\n return \"Linked List is Empty\"\n\n l = node\n while l is not None:\n print(l.data, end=\" ----> \")\n l = l.next\n print()\n return", "def display(self):\n printval = self.head \n while printval.next is not None:\n print (printval.__repr__(), end=\"\")\n printval = printval.next\n else:\n print (printval.__repr__())", "def show(self):\n traverse = self.head\n while traverse.next != None:\n print(traverse.data)\n traverse = traverse.next\n print(traverse.data)", "def print_list(self):\r\n head = self\r\n tail = self.__next # go to my next node\r\n if tail is not None: # as long as the end of the list has not been reached\r\n print(head, end=\" \") # print my head\r\n tail.print_list() # recursively print remainder of the list\r\n else: # print the last element\r\n print(head, end=\" \")", "def display(self):\n\n current = self.head\n\n while current is not None:\n print(current.data)\n\n current = current.next", "def show(self):\n current = self._head\n print(current._data)\n while current._next:\n current = current._next\n print(current._data)", "def display(self):\n\t\tpointer = self.head\n\t\twhile pointer != None:\n\t\t\tprint pointer.state + \"\\t\" + pointer.info\t\n\t\t\tpointer = pointer.next", "def print_linked_list(head):\n while head != None:\n print head.val, \n head = head.sibling\n print", "def show(self):\n\n if self.front == None:\n print(\"Linked List is empty\")\n return\n\n while self.front.next != None:\n print(self.front.data)\n self.front = self.front.next\n\n print(self.front.data)", "def print_list(self):\r\n pass", "def print_list(self):\n node = self.head\n\n string = '['\n while node:\n if node.next:\n string += str(node.value) + ' -> '\n else:\n string += str(node.value)\n node = node.next\n string += ']'\n return string", "def printList(head) :\n \n # Iterate through the list, printing all values\n ptr = head\n while ptr :\n print(ptr.data, end=\" \")\n ptr = ptr.next\n print()", "def printList(head):\n print(deconstructList(head))", "def dump(self, mark='----'):\n print(mark)\n node = self.head\n while node:\n print(node, \" \", end='')\n node = node.next\n print()", "def print_list(head=None):\n print head\n if(head.next):\n print_list(head.next)", "def print(self):\n output_string = \"Printing List of Nodes.\\n\"\n print(\"Printing List of Nodes\")\n for node in self.nodes:\n if node:\n output_string += str(node)\n node.print()\n return output_string", "def dump(self) -> NoReturn:\n index = self._head\n while index:\n print(index.data, end=\" \")\n index = index.next", "def displayNode(self):\n for x in self.__node:\n print(x)", "def display(self):\r\n elems = [] #create a list of elements we've seen\r\n current_node = self.head\r\n while current_node.next!=None:\r\n current_node = current_node.next\r\n elems.append(current_node.data)\r\n print(elems)", "def printLR(headNode):\n node = headNode\n \n while node is not None:\n print(node.item, end = \"\\t\")\n node = node.rightL\n\n print(\"end of linked list\")", "def print(self, index):\n count=0\n start = self.head\n while start:\n if count==index:\n print(count, ' : ', start.getMember())\n break\n start=start.getLink()\n count+=1", "def print_list_of_nodes(self):\n\n for node in self.list_empty_nodes:\n print(\"--------------------------\")\n print(\"Node num : \"+str(node.num))\n print(\"Node distance from start point : \"+str(node.distance_from_start_point))\n if node.pere is None:\n print(\"Pas de père\")\n else:\n print(\"Num du père : \"+str(node.pere.num))", "def display(self):\n current = self.head\n result = \"(\"\n while current is not None:\n if isinstance(current.data, str):\n result = result + \"'\" + current.data + \"'\"\n else:\n result = result + str(current.data)\n if current.next_node is not None:\n result += \", \"\n current = current.next_node\n result += ')'\n print(result)\n return result", "def print_node(self):\n print('{:15}{:3}'.format(self.data, self.count))", "def __str__(self) -> str:\n content = ''\n if self.head is not None:\n content = str(self.head)\n cur = self.head.next\n while cur is not None:\n content += ' -> ' + str(cur)\n cur = cur.next\n return 'SLL [' + content + ']'", "def __str__(self) -> str:\n content = ''\n if self.head is not None:\n content = str(self.head)\n cur = self.head.next\n while cur is not None:\n content += ' -> ' + str(cur)\n cur = cur.next\n return 'SLL [' + content + ']'", "def traverse(self):\n current = self.head\n while current is not None:\n print current.value\n current = current.next", "def display(self, end=\"->\"):\n curr = self.head\n result = ''\n while curr != None:\n result += str(curr.data) + end\n curr = curr.link\n result = result.strip(end)\n print(result)", "def __str__(self):\n\n return self._fold_loop(lambda x, y: y + \"%s -> \" % x, \"LinkedList [\",\\\n self._head) + \"None]\"", "def print_child_list(head):\n while head != None:\n print head.val, \n head = head.child\n print", "def print_list(self):\n self.print_avec_separateur(\" \")", "def __str__(self):\n\n list_str = ''\n current = self.head\n while current:\n # print(current, \"current\")\n list_str += str(current.value ) + ', '\n current = current.next\n return list_str[:-2]", "def display(self):\n container = []\n current = self.head\n while current is not None:\n container.append(current.val)\n current = current.next\n print(tuple(container))\n return tuple(container)", "def print_num(head):\n ptr = head\n digits = []\n while ptr:\n digits.append(ptr.data)\n ptr = ptr.next_node\n print(\"\".join(map(str, reversed(digits))))", "def __str__(self):\n reprStr = ''\n currNode = self.head\n while currNode:\n reprStr = reprStr + str(currNode.count) + ' ' + str(currNode.data) + '\\n'\n currNode = currNode.next\n return reprStr", "def display(self):\n node = self.head\n display_this = []\n while node:\n display_this.append(node.data)\n node = node.next\n return str(display_this).replace(\"[\", \"(\").replace(\"]\", \")\")", "def displayForward(self):\n if self.is_empty():\n print(\"Nothing to print...\")\n return\n \n walk = self._start._next # starting from the head\n \n print(self._start._element, end=' ') # print the start element first\n while walk is not self._start:\n print(walk._element, end=' ')\n walk = walk._next\n \n print() # for newline", "def show(self):\n traverse = self.head\n\n if self.top <= -1:\n print(\" Stack Underflow\")\n return\n if traverse == None:\n print(\"Stack is empty\")\n return\n\n while traverse.next != None:\n print(traverse.data)\n traverse = traverse.next\n print(traverse.data)", "def __str__(self):\n out = '['\n if self.head != None:\n cur = self.head\n out = out + str(self.head)\n cur = cur.next\n while cur != None:\n out = out + ' -> ' + str(cur)\n cur = cur.next\n out = out + ']'\n return out", "def __str__(self):\n temp = self.__head\n ss = []\n while temp is not None:\n ss.append(str(temp.data))\n temp = temp.next_node\n return ('\\n'.join(ss))", "def __str__(self):\n\n list_str = ''\n current = self.head\n while current:\n # print(current, \"current\")\n list_str += str(current.value ) + ', '\n current = current.next\n \n return list_str[:-2]", "def printlist(self):\n current_element = self.head\n items = []\n while current_element:\n items.append(current_element.value)\n current_element = current_element.next\n return items", "def PrintAdjacencyList(self):\n print(\"\\nAdjacency list:\")\n\n for node in self.nodes:\n node.PrintNeighbours()", "def __repr__(self):\n\n return \"LinkedList created\"", "def __str__(self):\n\n result = \"\"\n\n temp = self.head\n while temp is not None:\n result += str(temp.data) + \" -> \"\n temp = temp.next\n\n return result[0:-4]", "def print_list(l):\n for elem in l:\n print(elem)", "def to_string(self):\n try:\n items = \" \"\n current = self.head\n while current:\n items += f\"{ {current.value} }->\"\n current=current.next\n items+=\"NULL\"\n print (items)\n return items\n # items.append(current.value)\n # current = current.next\n # print(''.join(f\"{ {k[1]} }->\" for k in enumerate(items))+'NULL')\n # return(''.join(f\"{ {k[1]} }->\" for k in enumerate(items))+'NULL')\n except Exception as error:\n print (f\"There is error in __init__ of LinkedList, the error {error}\")", "def print_backward(self):\n head = self\n tail = self.__next # go to my next node\n if tail is not None: # as long as the end of the list has not been reached\n tail.print_backward() # recursively print remainder of the list backwards\n print(head, end=\" \") # print my head", "def printall():\n print listAll()", "def __str__(self):\n temp = \"head\"\n temp_node = self.head\n while temp_node is not None:\n temp += f' -> {temp_node.val}'\n temp_node = temp_node.next\n temp += f'-> None'\n return temp", "def __repr__(self):\n return \"{}\".format(self._head)", "def print_tree(self):\n\t\tprint(self.__print_tree('', True, ''))", "def printNode(self):\n print(\"Instance ID :\" + str(self.instanceID))\n print(\"Counter value :\" + str(self.counter))\n print(\"appData :\")\n for i in range(0, len(self.appData)):\n print(self.appData[i])\n print(\"syncDataStructure :\")\n for key, value in self.syncDataStructure.items():\n print(key + \":\")\n print(value)\n print(\"Store :\")\n for key, value in self.store.items():\n print(key + \":\")\n value.printStoreRecord()", "def __repr__(self):\n return 'LinkedList({!r})'.format(self.items())", "def __str__(self):\n string = \"\"\n cur = self.__head\n while cur is not None:\n string += str(cur.data)\n cur = cur.next_node\n if cur is not None:\n string += \"\\n\"\n return string", "def print_backward(self):\r\n head = self\r\n tail = self.__next # go to my next node\r\n if tail is not None: # as long as the end of the list has not been reached\r\n tail.print_backward() # recursively print remainder of the list backwards\r\n print(head, end=\" \") # print my head\r", "def __repr__(self):\n\n nodes = []\n current = self.head\n\n while current:\n if current is self.head:\n nodes.append('[Head: %s]' % current.data)\n elif current.next_node is None:\n nodes.append('[Tail: %s]' % current.data)\n else:\n nodes.append('[%s]' % current.data)\n current = current.next_node\n\n return '-> '.join(nodes)", "def __repr__(self):\n return \"LinkedList([{}],{}/{})\".format(self.cur_node, self.cur_pos, self.length)", "def print(self) -> None:\n\n print('')\n print(f\"{self.get_name()}, {self.get_description()}\")\n print('-------------')\n for child in self._children:\n child.print()", "def __str__(self):\n runner = self.__head\n if runner is None:\n return \"\"\n while runner.next_node:\n if runner is not None:\n print(\"{}\".format(runner.data))\n runner = runner.next_node\n return \"{}\".format(runner.data)", "def print_list(lst):\n i = 0\n while i < len(lst):\n print(lst[i])\n i += 1", "def print(self):\n print(\"Repository list: \")\n for repo in self.list:\n print(\"- \" + repo.name)", "def display_list(self, prefix=''):\n parent = '' if self.parent is None else self.parent.id\n children = [] if not self.children else [c.id for c in self.children]\n output = (\n f'{prefix}NODE ID: {self.id}\\n'\n f'{prefix} type: {self.node_type}\\n'\n f'{prefix} label: {self.label}\\tparent node: {parent}\\n'\n f'{prefix} arity: {self.arity}\\tchild node(s): {children}\\n\\n')\n if self.children:\n output += ''.join(child.display_list(prefix=prefix+'\\t')\n for child in self.children)\n return output", "def basic_print(lista):\n for item in lista:\n print(\"{} \\t\\t {}\".format(item[0], item[1]))", "def printTree(self):\n print(printTreeF(self, 0, self))", "def print_list_reverse(head):\n if not head:\n return\n\n top = head\n\n tail = head.next\n print_list_reverse(tail)\n print top", "def print_tree(self):\n print(_Node.__print_tree(self))", "def print(self):\n print(self.pretty_str())", "def show(self):\n\n if self.front == None:\n print(\"Queue is empty\")\n return\n\n while self.front.next != None:\n print(self.front.data)\n self.front = self.front.next\n\n print(self.front.data)", "def my_print(self):\n if self.__size > 0:\n print(\"\\n\" * self.__position[1], end=\"\")\n for i in range(self.__size):\n print(\" \" * self.__position[0], end=\"\")\n print(\"#\" * self.__size)\n else:\n print()", "def display_content(self):\n list = []\n traverse = self.head\n\n if self.head == None:\n # print(\"Linked List is empty\")\n return\n\n while traverse.next != None:\n list.append(traverse.data)\n traverse = traverse.next\n\n list.append(traverse.data)\n return list", "def __str__(self):\n s = \"\"\n current = self.__head\n while current:\n s += str(current.data) + \"\\n\"\n current = current.next_node\n return s[:-1]", "def print(self):\n\n print(self)", "def __str__(self):\n\n string = \"\"\n\n current = self.head\n\n while current is not None:\n string += f\"{ {current.value} } -> \"\n current = current.next\n\n string += f\" None \"\n\n return string", "def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)", "def dft_print(self):\n #print(self.value)\n #if self.left:\n # self.left.dft_print()\n #if self.right:\n # self.right.dft_print()\n stack = []\n stack.append(self)\n while len(stack):\n current = stack.pop()\n print(current.value)\n if current.left:\n stack.append(current.left)\n if current.right:\n stack.append(current.right)", "def pprint(self):\n print(self.pprint_str())", "def __str__(self):\n\n return LinkedList.str_recursive(self.front)", "def printL(L, L_name='List', verbose=True):\n if verbose:\n ('\\n[' + L_name + ']:')\n if verbose:\n for item in list(L):\n print('\\t' + str(item))\n print('[' + L_name + '] length: ' + str(len(L)) + '\\n')", "def test_linked_list_display(new_ll):\n from linked_list import Linked_List\n result = str((\"apple\", 3, \"pear\", 1, \"something\"))\n assert new_ll.display() == result", "def print_list(l):\n print('[' + ', '.join([x.__str__() for x in l]) + ']')", "def __repr__(self):\n nodes = []\n current = self.head\n while current:\n nodes.append(repr(current))\n current = current.next\n\n return '[' + ','.join(nodes) + ']'", "def __str__(self):\n elements = []\n current = self._head\n while current:\n elements.append(str(current.val))\n current = current.next\n return ' -> '.join(elements)", "def print(self):\n for i, v in enumerate(self._adj):\n if v:\n print(\"vertex {0}\".format(i))\n for e in v:\n print(e)\n print()", "def __str__(self) -> str:\n ret = StringBuilder(\"\")\n current = self.head\n while current:\n ret += current.info\n current = current.next\n return str(ret)", "def display(self):\n print(self)", "def show(self):\n\n print(self._walk(self, depth=1))", "def __str__(self):\n string = \"\"\n cur_node = self.head\n while cur_node is not None:\n string += cur_node.data.__str__()\n cur_node = cur_node.next\n return string", "def my_print(self):\n if self.__size == 0:\n print()\n else:\n print(\"\\n\" * self.__position[1], end='')\n for x in range(self.__size):\n print(\" \" * self.__position[0], end='')\n print(\"#\" * self.__size)", "def __repr__(self):\n nodes = []\n curr = self.head\n while curr:\n nodes.append(repr(curr))\n curr = curr.next\n return '[' + ', '.join(nodes) + ']'", "def __repr__(self):\n nodes = []\n curr = self.head\n while curr:\n nodes.append(repr(curr))\n curr = curr.next\n return '[' + ', '.join(nodes) + ']'" ]
[ "0.8673001", "0.86067754", "0.85906494", "0.8569578", "0.85571915", "0.8512224", "0.84492445", "0.8400473", "0.82561886", "0.8178204", "0.8120253", "0.8110577", "0.809658", "0.7893961", "0.7872987", "0.7842627", "0.78114355", "0.77788997", "0.77637863", "0.7760473", "0.77422404", "0.77183205", "0.758904", "0.7575949", "0.74362737", "0.74153066", "0.73767954", "0.73752975", "0.7250266", "0.7212496", "0.6971791", "0.6938455", "0.6930603", "0.69179547", "0.69179547", "0.68953097", "0.68952614", "0.6895041", "0.68853784", "0.68028766", "0.6778831", "0.6757496", "0.6755456", "0.674995", "0.6745256", "0.6722875", "0.6709062", "0.6702823", "0.6696399", "0.66884583", "0.6687873", "0.66702294", "0.6661644", "0.66583806", "0.6657863", "0.66468185", "0.66373545", "0.66332096", "0.66301894", "0.66246426", "0.66244483", "0.6622472", "0.66210157", "0.66126347", "0.66085404", "0.6601988", "0.659168", "0.65843004", "0.65760326", "0.65755576", "0.6569313", "0.65610534", "0.6559638", "0.6530888", "0.65221995", "0.65215796", "0.6515312", "0.65152264", "0.6498614", "0.64944005", "0.64900404", "0.64527595", "0.6445252", "0.6437155", "0.643382", "0.64320785", "0.6429429", "0.64101106", "0.6406318", "0.6400112", "0.639418", "0.6385546", "0.6373634", "0.637068", "0.6369388", "0.6355149", "0.6346118", "0.63438827", "0.6342769", "0.6342769" ]
0.88183177
0
Move in the linked list n places, if reached the end start from the start
def move_circular(self, count): planned_move = self.cur_pos + count if count + planned_move < self.length: move_count = count self.cur_pos += move_count # print("Move less from {} to {}({})".format( # self.cur_pos, # self.cur_pos + move_count, # planned_move # )) else: self.cur_node = self.start_node move_count = max(0, (planned_move % (self.length))) # print("Move circ from {} to {}({})".format(self.cur_pos, move_count, planned_move)) self.cur_pos = move_count for _ in range(move_count): self.cur_node = self.cur_node.next
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move(self, n: int) -> \"Linked[T]\":\n out = self\n if n >= 0:\n for _ in range(n):\n out = out.forward\n else:\n for _ in range(-n):\n out = out.backward\n return out", "def move_stack(n, start, end):\n assert 1 <= start <= 3 and 1 <= end <= 3 and start != end, \"Bad start/end\"\n \n if n == 1:\n print_move(start, end) \n else:\n extra_pole = 6 - start - end\n move_stack(n-1, start, extra_pole)\n move_stack(1, start, end)\n move_stack(n-1, extra_pole, end)\n\n #GSI helped, still don't completely understand, need to review more", "def move_stack(n, start, end):\n assert 1 <= start <= 3 and 1 <= end <= 3 and start != end, \"Bad start/end\"\n \"*** YOUR CODE HERE ***\"\n if n == 1:\n print_move(start, end)\n return\n handle = 6 - start - end\n move_stack(n - 1, start, handle)\n print_move(start, end)\n move_stack(n - 1, handle, end)\n return", "def move_stack(n, start, end):\n assert 1 <= start <= 3 and 1 <= end <= 3 and start != end, \"Bad start/end\"\n \"*** YOUR CODE HERE ***\"", "def move_stack(n, start, end):\n assert 1 <= start <= 3 and 1 <= end <= 3 and start != end, \"Bad start/end\"\n \"*** YOUR CODE HERE ***\"", "def _increment_move(move, direction, n):\n # print(move)\n move = list(map(sum, zip(move, direction)))\n #move = (move[0]+direction[0], move[1]+direction[1])\n while all(map(lambda x: 0 <= x < n, move)):\n #while 0<=move[0] and move[0]<n and 0<=move[1] and move[1]<n:\n yield move\n move=list(map(sum,zip(move,direction)))\n #move = (move[0]+direction[0],move[1]+direction[1])", "def _increment_move(move, direction, n):\n # print(move)\n move = list(map(sum, zip(move, direction)))\n #move = (move[0]+direction[0], move[1]+direction[1])\n while all(map(lambda x: 0 <= x < n, move)): \n #while 0<=move[0] and move[0]<n and 0<=move[1] and move[1]<n:\n yield move\n move=list(map(sum,zip(move,direction)))\n #move = (move[0]+direction[0],move[1]+direction[1])", "def move_back(t,n):\n lt(t)\n bk(t, n)\n rt(t)", "def up_to(self, n, l):\n for i in range(self._, n):\n l(i)\n return self", "def move(self):\n self.old_tail = self.body[-1][:] # save old position of last block\n self.head[0] += self.direction[0] # moves head\n self.head[1] += self.direction[1]\n \n self.head[0] = (self.head[0] + self.xMaxSize) % self.xMaxSize\n self.head[1] = (self.head[1] + self.yMaxSize) % self.yMaxSize\n \n if self.head in self.body[1:]: # if snakes hits himself\n self.alive = False\n self.body.insert(0, self.body.pop()) # each block is replace by predecessor\n self.body[0] = self.head[:] # first block is head", "def move(self, amount):\n self.__validate_index(amount)\n for i in range(amount):\n self.__list = self.__list[1:] + [self.__list[0]]\n return self.__list", "def nextend(lst, n):\n nl = []\n for i in range(n):\n nl.extend(lst)\n return nl", "def hanoi(n, source, target, helper):\n if n > 0:\n hanoi(n-1, source, helper, target)\n print(\"move disk from\", source, \"to\", target)\n hanoi(n-1, helper, target, source)", "def lloc(_list, n):\n if n < 0:\n return len(_list[0]) + n\n return n", "def _move(self, direction, count, step_size):\n for _ in range(count):\n if not self.has_hit_edge():\n self.position = min(self.scene.step_count - 1, max(0,\n self.position + direction * step_size))\n self._take_measure()\n self.steps_taken += 1\n self.visited_positions.append(self.position)", "def move(degs, i, j, n):\n if n > 0:\n temp = 3 - i - j\n move(degs, i, temp, n - 1) \n degs[j].append(degs[i].pop(-1))\n print(degs)\n move(degs, temp, j, n - 1)", "def decide_next_move(self):\n pass", "def shifter(list):\n #sc1 = \"objects \" #Scaffolding message variables. Temporary\n #sc2 = \" and \"\n #sc3 = \" switched\"\n #sc4 = \" in order\"\n n = len(list) #Assign length of list to variable n\n x = 0 #Start at first position in list\n while listscan(list):\n if list[x] > list[x + 1]:\n t1= list[x] #Assign both items to a variable, then reinsert in opposite positions\n t2 = list[x + 1]\n list[x + 1] = t1\n list[x] = t2\n #print(sc1 + str(x) + sc2 + str(x + 1) + sc3)\n if x + 1 < n - 1: #Only when not at end\n x = x + 1 #Move position one more right\n else: #Base case when unsorted\n x = 0 #Restart Cycle\n else: #If sorted, and more room to right, move over one, leave items in position.\n if x + 1 < n - 1:\n #print(sc1 + str(x) + sc2 + str(x + 1) + sc4)\n x = x + 1\n else: #Base case. If at end of list, and items in order, leave.\n print(sc1 + str(x) + sc2 + str(x + 1) + sc4)\n x = 0 #Restart cycle", "def rotate(l: list, n: int) -> list:\n return l[-n:] + l[:-n]", "def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n dummy = ListNode(0)\n dummy.next = head\n first = dummy\n second = dummy\n\n for i in range(n + 1):\n first = first.next\n\n while first:\n first = first.next\n second = second.next\n\n second.next = second.next.next\n\n return dummy.next", "def partial_reverse(lst, start):\n for x in lst[start:]:\n lst.insert(start,x)\n lst.pop()", "def step(self, n, dlist):\n pass", "def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n\n if not head or not head.next:\n return None\n\n first_pointer = head\n second_pointer = head\n for i in range(n):\n second_pointer = second_pointer.next\n if not second_pointer:\n return head.next\n\n while second_pointer.next:\n first_pointer = first_pointer.next\n second_pointer = second_pointer.next\n first_pointer.next = first_pointer.next.next\n\n return head", "def _move_to_head(self, node):\n self._remove_node(node)\n self._add_node(node)", "def _advance(self):\n self._prev, self._current = self._current, self._prev + self._current", "def advance_n(self,n):\n print(self)\n for i in range(n):\n self.advance_one()\n print(self)", "def step(self, move):", "def _ispinnedmove(self, from_, to_):\n return False", "def insert(self, n, new_node):\n curr_node = self.head\n \n i = 0\n while i < n:\n if curr_node.next is None:\n raise IndexError(\"list is shorter than n\")\n curr_node = curr_node.next\n i += 1\n \n new_node.next = curr_node.next\n curr_node.next = new_node\n \n return None", "def move(self):\n self._move_range_shuffle(3)\n self._move_satisfy_random_constraint()\n # self._move_range_shuffle(3)\n #if (curr_energy > 50):\n # self._move_satisfy_random_constraint()\n #else:\n # self._move_range_shuffle(3)", "def move_to_end(self, node):\n\n if self.size == 0: # no items in list\n return # there is no such node\n\n if node is self.tail: # node is already at end as it is the tail\n return # node does not need to be moved\n\n if node is not self.head:\n # node must be in the middle of list (node is neither head nor tail, list is not empty)\n node.prev.next = node.next # since node is not head, deal with node.prev\n\n else: # if node is at the beginning of the list\n self.head = node.next\n\n node.next.prev = node.prev # assign the next_node's prev pointer to point at prev_node\n self.tail.next = node # point current_tail.next at node\n node.prev = self.tail # point node.prev at current_tail\n node.next = None # assign node.next none as it will be tail (and thus at end of list)\n self.tail = node # assign node as tail", "def move_to(numbers, p_current, relative = False):\n if len(numbers) != 2:\n return None\n\n p_start = Point(numbers[0], numbers[1]) #first point\n if relative:\n p_start += p_current\n return p_start", "def towel_of_hanoi(n):\n degs = [[n - i for i in range(n)], [], []]\n move(degs, 0, 2, n)", "def move_step(self, move):\n # Check that the move is valid\n steps = self.mgr.obj.steps\n if len(steps) == 0:\n return\n idx = self.stepsListWidget.currentRow()\n idx_max = len(steps) - 1\n if (idx+move < 0) or (idx+move > idx_max):\n return\n \n # Insert the step at its new location, then delete it at the old location\n steps.insert(idx+move+(move>0), steps[idx])\n del steps[idx if move>0 else idx+1]\n \n self.load_steps()\n self.stepsListWidget.setCurrentRow(idx+move)\n self.mgr.changed = True", "def move_start_node(self, x, y):", "def move(t, length):\n pu(t)\n\t\n fd(t, length)\n pd(t)", "def rotate(l, n):\n return l[n:] + l[:n]", "def rotate(l, n):\n return l[n:] + l[:n]", "def move_to_end(s, n):\n first=s[0:n]\n return s[n:] + first", "def move_by(self, increment):\n return self.move_to(self.position + increment)", "def drop(lst, n): # noqa: N805\n for _ in range(n):\n try:\n lst = lst.tail\n except AttributeError:\n break\n return lst", "def findPlacesToMove():\n movesDestinations = [];\n \n curY = curBlank[0];\n curX = curBlank[1];\n\n if(curY-1 >= 1): #UP\n movesDestinations.append((curY-1, curX));\n if(curY+1 <= n): #DOWN\n movesDestinations.append((curY+1, curX));\n if(curX-1 >= 1): #LEFT\n movesDestinations.append((curY, curX-1));\n if(curX+1 <= n): #RIGHT\n movesDestinations.append((curY, curX+1));\n \n return movesDestinations;", "def shift_item_up(self, index):\n while index > 0:\n parent_index = index // 2\n if parent_index > 0 and self.heaplist[parent_index] < self.heaplist[index]:\n self.heaplist[parent_index], self.heaplist[index] = self.heaplist[index], self.heaplist[parent_index]\n index = index // 2", "def skip(self, n=None):\n while n > 0:\n try:\n self.next()\n except StopIteration:\n break\n n -= 1", "def siftDown(start, count):\n root = start\n while root * 2 + 1 < count:\n child = root * 2 + 1 # 'child' is the left children of the current node\n if child < count - 1 and self.data[child] > self.data[child + 1]:\n # Verify that right sibling is lower than the left one, if so,\n # let 'child' be the right sibling\n child += 1\n if self.data[root] > self.data[child]:\n # Swap the current child and the parent if the parent is higher than the child\n self.data[root], self.data[child] = self.data[child], self.data[root]\n root = child\n else:\n return", "def value_n_from_end(self, n):\n size = self.size()\n if n < 0:\n return \"The value passed cannot be negative\"\n if n > size:\n return \"the value passed cannot be greater than the size\"\n\n h = self.head\n\n # MY SOLUTION - O(2n) TIme O(1) Space - For a Huge List it will take more Time to Traverse 2 times.\n # idx = 0\n # remainder = size - n\n # while h is not None:\n # if idx == remainder:\n # return h.data\n # idx += 1\n # h = h.next\n\n # BETTER SOLUTION - O(n) Time and O(m) Space\n # BEST SOLUTION - Check Cracking the Coding Interview Q-2.2\n arr = list()\n while h is not None:\n if len(arr) == n + 1:\n arr.pop(0)\n arr.append(h.data)\n h = h.next\n return arr[0]", "def make_times_with_n_step(self, start, end, n):\n self.target_times = []\n step = start\n delta = old_div((end - start), float(n))\n while step <= end:\n self.target_times.append(step)\n step += delta", "def rotate(l, n=1):\n return l[n:] + l[:n]", "def next_move(self):\n self.branchize()\n try:\n _, new_branch = self.score_scheme.pop(0)\n except IndexError:\n return False\n\n move_list = [new_branch[\"move\"]]\n parent = new_branch[\"parent\"]\n\n while True:\n try:\n move_list.append(parent[\"move\"]) \n parent = parent[\"parent\"]\n\n except KeyError:\n break\n\n self.__originate__()\n for i in move_list[::-1]:\n self.change(i)\n\n self.current_branch = new_branch\n self.output += str(self)\n return True", "def RotateList(p_l: list, p_n: int):\n return p_l[p_n:] + p_l[:p_n]", "def moveNext(self):\n parentNode = self.parentNode\n index = parentNode.idevices.index(self)\n if index < len(parentNode.idevices) - 1:\n temp = parentNode.idevices[index + 1]\n parentNode.idevices[index + 1] = self\n parentNode.idevices[index] = temp", "def move_end_node(self, x, y):", "def set_first(self, n):\r\n self.__head = n", "def move(self, head, steps):\n self.turn(head)\n if self.direction == 0:\n self.x += int(steps)\n if self.direction == 1:\n self.y += int(steps)\n if self.direction == 2:\n self.x -= int(steps)\n if self.direction == 3:\n self.y -= int(steps)", "def move(self, step):\n for point in self.points:\n l = min(len(step), len(point.position))\n for i in range(l):\n point.position[i] = step[i]", "def backtrack_to_start(board, end):\r\n cell = board.at(end)\r\n # print(cell)\r\n path = []\r\n lis = []\r\n while cell != None:\r\n path.append(cell)\r\n cell = cell.path_from\r\n for i in path[-1:]:\r\n for j in i.position:\r\n lis.append(j)\r\n next_move = lis[-4:-2]\r\n\r\n return next_move", "def add_position_recur(lst, number_from=0):\r\n # base case empty list returns the empty list\r\n if lst == []:\r\n return []\r\n\r\n else:\r\n initial_value = lst[0]\r\n return [initial_value + number_from] + \\\r\n add_position_recur(lst[1:], number_from + 1)", "def moveNorth(self):\n north = (self.current[0], self.current[1] - 1)\n mv = self.lab.north()\n self.check_grue(mv)\n self.current = north\n self.visited.add(self.current)", "def mass_move(self,\r\n entrylist1,\r\n entrylist2):\r\n\r\n\r\n if len(entrylist1) > len(entrylist2):\r\n entrylist2 = entrylist2+list(range(entrylist2[-1]+1,\r\n (entrylist2[-1]\r\n +len(entrylist1)\r\n -len(entrylist2))))\r\n lastindexto = entrylist2[0]\r\n\r\n e1 = iter(entrylist1)\r\n e2 = iter(entrylist2)\r\n gofurther = True\r\n\r\n while gofurther:\r\n try:\r\n indexfrom = next(e1)\r\n except StopIteration:\r\n indexfrom = StopIteration\r\n try:\r\n indexto = self.find_space(next(e2), entrylist2)\r\n except StopIteration:\r\n indexto = StopIteration\r\n if indexto != StopIteration:\r\n lastindexto = indexto\r\n if indexto == StopIteration:\r\n indexto = self.find_space(lastindexto)\r\n\r\n if indexfrom != StopIteration:\r\n self.display_buffer.append(alerts.MOVING_FROM\r\n +index_reduce(str(indexfrom))\r\n +queries.TOTO+str(indexto))\r\n self.move(indexfrom, indexto)\r\n else:\r\n gofurther = False", "def addMoveToList(path, start, end):\n if start[1] == end[1]:\n if start[0] > end[0]:\n for i in range(start[0], end[0], -1):\n if [i, end[1]] in visited:\n print(\"Point: {}, {} already visited.\").format(i, end[1])\n visited.append([i, end[1]])\n path.setXY(i, end[1])\n path\n return True\n visited.append([i, end[1]])\n else:\n for i in range(start[0], end[0]):\n if [i, end[1]] in visited:\n print(\"Point: {}, {} already visited.\").format(i, end[1])\n visited.append([i, end[1]])\n path.setXY(i, end[1])\n return True\n visited.append([i, end[1]])\n\n else:\n if start[1] > end[1]:\n for i in range(start[1], end[1], -1):\n if [end[0], i] in visited:\n print(\"Point: {}, {} already visited.\").format(end[0], i)\n visited.append([end[0], i])\n path.setXY(end[0], i)\n return True\n visited.append([end[0], i])\n\n else:\n for i in range(start[1], end[1]):\n if [end[0], i] in visited:\n print(\"Point: {}, {} already visited.\").format(end[0], i)\n visited.append([end[0], i])\n path.setXY(end[0], i)\n return True\n visited.append([end[0], i])\n\n return False", "def reorderList(self, head: ListNode) -> None:\r\n if head == None:\r\n return head\r\n\r\n temp = head\r\n count_size = 0\r\n\r\n while temp is not None:\r\n count_size+=1\r\n temp = temp.next\r\n\r\n temp = head\r\n head1 = None\r\n break_point = 0\r\n\r\n while temp is not None:\r\n break_point+=1\r\n\r\n if count_size % 2 == 0 and break_point == count_size // 2:\r\n break\r\n elif count_size % 2 == 1 and break_point == (count_size // 2)+1:\r\n break\r\n\r\n temp = temp.next\r\n\r\n\r\n head1 = temp.next\r\n temp.next = None\r\n\r\n head1 = reverse(head1)\r\n\r\n temp1 = head\r\n ptr , ptr1 = None,None\r\n\r\n while temp1 and head1:\r\n ptr = head1\r\n head1 = head1.next\r\n ptr1 = temp1.next\r\n temp1.next = ptr\r\n ptr.next = ptr1\r\n temp1 = temp1.next.next\r\n\r\n return head", "def moveZeroes(self, n: List[int]) -> None:\n w = 0\n for r in range(len(n)):\n if n[r] != 0:\n n[r], n[w] = 0, n[r]\n w += 1", "def move(self): \n # range(start, stop, step)\n for seg_num in range(len(self.segments) - 1, 0, -1):\n new_x_position = self.segments[seg_num - 1].xcor()\n new_y_position = self.segments[seg_num - 1].ycor()\n self.segments[seg_num].goto(new_x_position, new_y_position)\n\n # moving first snake's segment 20 spaces and updating last_direction\n self.head.forward(MOVE_DISTANCE)\n self.last_direction = self.head.heading()", "def MoveUpToNode(self, node, stack, node_start):\n if not node.children: raise ValueError(\"Node has no children\");\n if node == node_start.parent:\n for xnode in reversed(stack): node.children.append(xnode);\n else:\n if not node.parent: raise ValueError(\"something is wrong\");\n self.MoveUpToNode(node.parent, stack, node_start);", "def next_n(self, n: int, fast_forward=False):\n raise NotImplementedError('Subclass must define the next_n method')", "def knightMovesForNTimes(n, x0, y0):\n x_init = x0\n y_init = y0\n # last position of the knight.\n # This is needed to tackle situations when the knight moves out of the chessboard.\n x_last = x0\n y_last = y0\n for i in range(n):\n # last position of the knight.\n # This is needed to tackle situations when the knight moves out of the chessboard.\n #x_last = x0\n #y_last = y0\n # randomly select any of the possible 8 moves of the knight\n move_dir = randint(1, 8)\n x, y = getMovePosition(move_dir, x_last, y_last)\n # update the last position of the knight\n x_last = x\n y_last = y\n #if x != -100 or y != -100:\n # when the knight is inside the chessboard\n # x_last = x\n # y_last = y\n # if the knight moves out of the chessboard, then the move is invalid\n # make a new move valid move\n #if x == -100 and y == -100:\n #else:\n # print(\"The knight moves out of the chessboard.\")\n # x, y = getMovePosition(move_dir, x_last, y_last)\n #break\n #i = i -1\n print(\"The knight moves to position ({} {}) after move# {}\\n\".format(x, y, i+1))\n\n return x, y", "def move_next(self, step=1):\n if self._index is not None and len(self) > self._index + step:\n self._index += step\n # if index >= end index of current frame --> recalculate findex\n if self._index >= self._findex * self._flen + self._flen:\n self._findex += int(math.ceil(step / float(self._flen)))\n return self[self._index]\n return None", "def move(self): # AH note. Swich move with extra_steps?\n if self.adjustment < 0:\n self.position += self.extra_steps\n super().move()\n self.no_moves += 1\n # Do the regular move", "def move_to_front(self, node):\n\n if self.size == 0: # no items in list\n return # nothing to move; return out\n\n if self.head is node: # if node is head already\n return # nothing to move, node is at beginning; return out\n\n if self.tail is node: # if node is tail\n self.tail = node.prev # shift tail left\n\n else: # else node must not be tail\n # if node is not tail, then node.next is not None\n node.next.prev = node.prev # sew node.next to node.prev\n\n node.prev.next = node.next # if node=tail next is None; else, next is a node. Works either way!\n self.head.prev = node # assign current head's prev to point at node instead of None\n node.next = self.head # place node before head\n self.head = node # reassign head (shifting left) head is now node\n self.head.prev = None # reassign head.prev to point at None (no nodes before head)", "def move(self, direction):\n head = self.snake[0]\n delta = self.dirs[direction]\n nextMove = [head[0] + delta[0], head[1] + delta[1]]\n if not self.isValidMove(nextMove):\n return -1\n\n if self.food and nextMove == self.food[0]:\n self.food.popleft()\n else:\n self.snake.pop()\n\n self.snake.appendleft(nextMove)\n\n return len(self.snake) - 1", "def _move_head(self, cmd):\n self.move_head(cmd.data)", "def _shift_up(self, idx):\n\n parent = (idx - 1) // 2\n while parent >= 0 and self.value(parent) < self.value(idx):\n self.items[parent], self.items[idx] = self.items[idx], self.items[parent]\n idx = parent\n parent = (idx - 1) // 2", "def get_next_position(self):", "def move(self, n):\n return self.file.seek(n, 0)", "def move(state, pos, rel_pos):\n new_state = state.copy()\n return swap(new_state, pos, pos + rel_pos)", "def move_to_head(self, node):\n if node is self.head:\n return\n value = node.value\n self.delete(node)\n self.add_to_head(value)", "def next_move(self, board):\n self.num_moves += 1\n \n \n ################### TODO: ######################################\n # Implement your strategy here. \n # Feel free to call as many as helper functions as you want.\n # We only cares the return of this function\n ################################################################", "def skipO2n(self, li: list, fromIndex):\r\n for _ in range(li[fromIndex]):\r\n skipO = random() < self.prob_skipsO2n\r\n if skipO:\r\n li[fromIndex + 2] += 1\r\n else:\r\n li[fromIndex + 1] += 1\r\n \r\n return li", "def chunk(lst, n):\n return [lst[i:i + n] for i in range(0, len(lst), n)]", "def move_to_position1(self):", "def move_element(self,n_a,n_b):\n self.element_array.insert(n_b,self.element_array.pop(n_a))", "def pop(self, n: int = 1) -> None:\n assert n > 0\n first_self = self\n last_self = self.move(n-1)\n\n first_other = last_self.forward\n last_other = first_self.backward\n \n last_other._join(first_other)\n last_self._join(first_self)", "async def make_move(index):\n if enough_players():\n GAME.make_move(index)\n await update_players()", "def move(self, x=0, y=0):\n\n # Skip stupid moves\n if x == 0 and y == 0:\n return\n\n # Add a new marker (which may move the most recent marker)\n if len(self.__points) == 0:\n self.add(self.x, self.y)\n p = self.__points[len(self.__points) - 1]\n self.x = p[0] + x\n self.y = p[1] + y\n self.add(self.x, self.y)\n\n # Calculate total length\n if self._total_length > self.length:\n self.prune()", "def set_of_moves_a(tk, max_count=0):\n # Choose an orientation. Repeat if no possible move. Do this even\n # on first entry, just in case someone else has been drawing solid\n # lines.\n this_increment = 2 # Hard coded for clarity - must be < MIN_MOVE\n yield_count = 0\n yield_increment = 1 if max_count else 0\n available_moves = range(len(POSSIBLE_ROTATION))\n target_distance = random.randint(MIN_MOVE, MAX_MOVE)\n heading = tk.heading()\n while len(available_moves) and yield_count <= max_count:\n head_index = random.choice(available_moves)\n rotation = POSSIBLE_ROTATION[head_index]\n del available_moves[available_moves.index(head_index)]\n tk.setheading(heading) # Back to starting direction\n tk.left(rotation) # And try this candidate\n offered_distance = available_distance(tk, target_distance,\n this_increment)\n if offered_distance >= MIN_MOVE:\n # Found a valid move\n yield_count += yield_increment\n yield tk.heading(), offered_distance\n # Back in after processing - reset choices\n available_moves = range(len(POSSIBLE_ROTATION))\n target_distance = random.randint(MIN_MOVE, MAX_MOVE)\n heading = tk.heading()\n #+--\n raise StopIteration", "def test_find_knight_next_moves_limit_of_board(self):\n result = _find_knight_next_moves('h1')\n self.assertEqual(['f2', 'g3'], result)", "def reorderList(self, head: ListNode) -> None:\n if not head:\n return \n # Find the middle node\n slow = fast = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n \n # reverse the second half\n prev = None\n while slow:\n temp = slow\n slow = slow.next\n temp.next = prev\n prev = temp\n \n start = head\n while prev.next:\n start.next, start = prev, start.next\n prev.next, prev = start, prev.next", "def move(self):\n self.pos += self.direc\n self.nearest_node = self.pixel_to_node()", "def move_forward():\n pass", "def value_n_from_end(self, n):\n # check the validity of the input\n if n > self.n-1:\n print(f\"Error; n is greater than the length of the list = {self.n-1}\") \n return\n \n temp_node = self.head # store head\n for _ in range((self.n-1) - n):\n temp_node = temp_node.next # traverse the list\n return temp_node.val", "def hollow(t, n):\n lt(t)\n skip(t, n)\n rt(t)", "def move_to_position2(self):", "def reorderList(self, head: ListNode) -> None:\n if not head:\n return None\n slow = fast = head\n while True: # find the middle node\n if not fast.next or not fast.next.next:\n break\n fast = fast.next.next\n slow = slow.next\n head_1 = head # first half of linked list\n head_2 = self.rev(slow.next, None)[0] if slow.next else None # second half\n slow.next = None\n # build result with above two halves\n dummy = runner = ListNode(0)\n while head_2:\n runner.next = head_1 # order matters!!!\n head_1 = head_1.next\n runner = runner.next # runner at head_1\n runner.next = head_2 # point head_1's next to head_2\n head_2 = head_2.next\n runner = runner.next # runner at head_2\n if head_1: # first half may have 1 additional node\n runner.next = head_1\n head_1.next = None\n return dummy.next", "def get_next_moves(board, player):\r\n\r\n if player == 'hare':\r\n moves = []\r\n next_moves = []\r\n\r\n (row_from, col_from) = get_hare_positions(board)\r\n moves = possible_moves_list(row_from, col_from)\r\n\r\n for move in moves:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_from, col_from, row_to, col_to):\r\n \"\"\" if move is allowed then add to list of next moves\"\"\"\r\n next_moves.append(move)\r\n\r\n return next_moves\r\n\r\n else:\r\n \"\"\" for individual hounds\r\n get next moves\"\"\"\r\n moves = []\r\n next_moves_hound1 = []\r\n next_moves_hound2 = []\r\n next_moves_hound3 = []\r\n\r\n (row_hound_1, col_hound_1), (row_hound_2, col_hound_2), (row_hound_3, col_hound_3) = get_hound_positions(board)\r\n moves_hound1 = possible_moves_list(row_hound_1, col_hound_1)\r\n moves_hound2 = possible_moves_list(row_hound_2, col_hound_2)\r\n moves_hound3 = possible_moves_list(row_hound_3, col_hound_3)\r\n\r\n for move in moves_hound1:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_1, col_hound_1, row_to, col_to):\r\n next_moves_hound1.append(move)\r\n\r\n for move in moves_hound2:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_2, col_hound_2, row_to, col_to):\r\n next_moves_hound2.append(move)\r\n\r\n for move in moves_hound3:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_3, col_hound_3, row_to, col_to):\r\n next_moves_hound3.append(move)\r\n\r\n return (next_moves_hound1, next_moves_hound2, next_moves_hound3)", "def insert(self, n, pos):\n if pos == 0:\n self.cons(n)\n else:\n prev = self.index(pos-1)\n next = prev.next\n prev.next = n\n n.next = next\n self.len += 1", "def random_walk_nextindex(currentindex, npoints, maxstepsize=1, neighborhoods=None):\n # neighborhood of current point...\n if neighborhoods:\n # ... given as input\n ineighbours = neighborhoods[currentindex]\n else:\n # ... all points of the grid separated by up to\n # *maxstepsize* from current point\n ineighbours = neighborhood(currentindex, npoints, maxdist=maxstepsize)\n\n # proposed move, chosen equiprobably amongst neighbours\n u = sample_uniform()\n nextindex = ineighbours[int(u * len(ineighbours))]\n\n # nb of neighbours of proposed point\n if neighborhoods:\n nnextneighbours = len(neighborhoods[nextindex])\n else:\n dist2edge = min(nextindex, npoints - 1 - nextindex)\n nnextneighbours = maxstepsize + min(maxstepsize, dist2edge)\n\n # the move is accepted with probability\n # P = min(1, nb current neighbours / nb next neighbours)\n P = float(len(ineighbours)) / float(nnextneighbours)\n return nextindex if (P >= 1 or sample_uniform() < P) else currentindex", "def forward(self):\n self.position += 1", "def move_items(self):\n self.set_fibonnaci_levels()", "def _move(self, direction, difference):\n future_tile_number = self.get_number() + difference\n if future_tile_number in range(1, Tile.total_tiles + 1):\n future_tile = Tile.get_tile(future_tile_number)\n if future_tile.walkable:\n self.set_target(future_tile)\n self.rotate(direction)", "def next_n(self, n, fast_forward=False):\n return list(it.islice(self.gen, n))" ]
[ "0.70314944", "0.6992453", "0.68459344", "0.66120696", "0.66120696", "0.6502313", "0.64976275", "0.6037973", "0.59619963", "0.592741", "0.5908573", "0.5853676", "0.58296", "0.58181906", "0.5809934", "0.5785236", "0.5776495", "0.57708544", "0.57155955", "0.5683604", "0.56449467", "0.56443304", "0.5632337", "0.5593683", "0.5584496", "0.55796546", "0.55425453", "0.5531612", "0.55280566", "0.5520254", "0.5513582", "0.55129707", "0.5498655", "0.54609334", "0.54531664", "0.54460776", "0.54307246", "0.54307246", "0.5414948", "0.54051185", "0.53864634", "0.53720915", "0.53601015", "0.534353", "0.5342886", "0.5342431", "0.53387797", "0.5337152", "0.53330725", "0.5327983", "0.5327415", "0.532561", "0.53230363", "0.5318225", "0.53034306", "0.5303285", "0.53015906", "0.52972984", "0.5292871", "0.52874076", "0.5277766", "0.5265708", "0.5263872", "0.52619475", "0.52574825", "0.5254916", "0.524808", "0.5246269", "0.524429", "0.52410424", "0.5236945", "0.52321404", "0.5223891", "0.5223702", "0.5223146", "0.5222696", "0.5216621", "0.521405", "0.52123713", "0.52082473", "0.5204217", "0.5196741", "0.5193365", "0.51925695", "0.519072", "0.51861584", "0.51784796", "0.5166805", "0.5163053", "0.5162017", "0.5157936", "0.5156769", "0.51547164", "0.51497084", "0.5148664", "0.5147058", "0.51461655", "0.5144006", "0.514387", "0.5140329" ]
0.64244884
7
Get the data of the next node
def get_next(self): return self.cur_node.next.data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next(node):\n return node['next']", "def data(self):\n return self.first_node.data", "def get_data(node):\n return node['data']", "def node_data(self):\n return self.node_data_", "def _next(self):\n node = self.head\n while node != None:\n yield node.data\n node = node.right", "def next_data(self):\n return self.data.pop()", "def next_node(self):\n return self.__next_node", "def next_node(self):\n return self.__next_node", "def next_node(self):\n return self.__next_node", "def get_data(self):\n self._zmq_request.send(b'next')\n return msgpack.loads(self._zmq_request.recv())", "def next_node(self):\n\n return self.__next_node", "def get_next(self):\n return self.next", "def get_next(self):\n return self.next", "def get_next(self) -> dict:\n raise NotImplementedError", "def traverse_forward(self) -> str:\n data = \"\"\n current = self.head\n while current is not None:\n data += f\"{current.get_data()} \"\n current = current.get_next_node()\n return data", "def get_next_node_address(self):\n result = self.other_nodes[self.current_node]\n self.current_node = (self.current_node + 1) % self.other_nodes_len\n return result", "def __init__(self, data, next_node=None): #self.next_node ??\n self.data = data\n self.next_node = next_node", "def getNext(self):\n\t\t\treturn self.next", "def getNext(self):\n return self.__nextListNode", "def peek(self):\n return self.head.data", "def get_next(node, offset):\n row, column = node\n row_offset, column_offset = offset\n return row + row_offset, column + column_offset", "def get_next_item(self):\n pass", "def get(self, index):\r\n if index >= self.length():\r\n print(\"ERROR\")\r\n return None\r\n current_index = 0\r\n current_node = self.head\r\n while True:\r\n current_node = current_node.next\r\n if current_index == index: return current_node.data\r\n current_index += 1", "def read_index(self, index):\n current = self.head\n if index == 0:\n return current.data\n elif index >= self.size() :\n return None\n else:\n position = 0\n while position < index:\n current = current.next_node\n position += 1\n return current.data", "def getNext(self):", "def next(self):\n return self.first_node.next", "def getNodeRRDData(self,node):\n data = self.connect('get','nodes/%s/rrddata' % (node),None)\n return data", "def getNext(self):\n return self.__next", "def _get_next_nodes(self):\n next_nodes = self.data[5] if not is_nan(self.data[5]) else \"eos\"\n if is_nan(next_nodes):\n next_nodes = \"eos\"\n return next_nodes", "def first(self):\r\n if self.head == None: #check if first(head) node is empty\r\n return 'null' #if yes, then return null\r\n else: #if it is not empty\r\n return self.head.data #return the data of head node\r", "def next(self):\n return self.my_next", "def next(self):\n return self.read_message()", "def next(self) -> int:\n self.pointer += 1\n return self.ordered_nodes[self.pointer-1]", "def __init__(self, data, next_node = None):\n self.data = data\n self.next_node = next_node", "def last(self):\r\n if self.tail == None: #check if last(tail) node is empty\r\n return 'null' #if yes, then return null\r\n else: #if it is not empty\r\n return self.tail.data #return the data of tail node\r", "def get_next(self):\n return self._next_previous_helper('next')", "def _next(self):\n i = 0\n while i < self.size:\n yield self.data[i]\n i += 1", "def next(self):\r\n return self.__next", "def peek(self):\n if self.is_empty():\n return None\n\n return self.linked_list.head.data", "def next(self):\n return self._next", "def next(self):\n nxt = self.readentry()\n if nxt is None:\n raise StopIteration\n return nxt", "def next(self):\n return self.__next", "def __init__(self, data):\n\n self.next = None\n \"\"\" Next text \"\"\"\n\n self.data = data\n \"\"\" Data text \"\"\"", "def get(self):\r\n return self.data[self.cur:]+self.data[:self.cur]", "def get(self):\n return self.data[self.cur:]+self.data[:self.cur]", "def next_element(self):\n return self.extract_element()", "def __init__(self, data, next=None):\n self.data = data\n self.next = next", "def value_at(self, index):\n if self.empty():\n return \"Linked List Empty\"\n\n idx = 1\n l = self.head\n while l.next is not None:\n if idx is index:\n break\n\n l = l.next\n idx += 1\n return l.data", "def __init__(self, data, next_node=None):\n self.data = data\n self.next_node = next_node", "def __init__(self, data, next_node=None):\n self.data = data\n self.next_node = next_node", "def __next__(self) -> object:\n if not self.current_node:\n raise StopIteration\n\n current_node_value = self.current_node.value()\n self.current_node = self.current_node.next()\n return current_node_value", "def _follow_next(self, url):\n response = self._json(self._get(url), 200)\n if response is not None:\n data = response['data']\n\n next_url = self._get_attribute(response, 'links', 'next')\n while next_url is not None:\n response = self._json(self._get(next_url), 200)\n if response is not None:\n data.extend(response['data'])\n next_url = self._get_attribute(response, 'links', 'next')\n\n return data\n else:\n return None", "def traverse(self):\n current = self.head\n while current is not None:\n print current.value\n current = current.next", "def __init__(self, data):\n self.data = data\n self.next = None", "def next(self) -> object:\n return self._next", "def next(self) -> int:\n self.index += 1\n return self.nodes_sorted[self.index]", "def next(self) -> int:\n node = self.list.pop()\n t = node.right\n while (t):\n self.list.append(t)\n t = t.left\n\n return node.val", "def value(self):\n return self.head", "def _next(self):\n i = 0\n while i < self.size:\n if self.data[i] != None:\n yield self.data[i]\n i += 1", "def next(self, data):\n\n if \"tag\" not in data.attrs:\n tag = self.count\n self.count += 1\n else:\n tag = data.attrs[\"tag\"]\n\n fname = \"%s_%s.h5\" % (self.root, str(tag))\n\n data.to_hdf5(fname)\n\n return data", "def next(self) -> str:\n raise NotImplementedError", "def next(self):\n node = self.stack.pop()\n self.pushLeft(node.right)\n return node.val", "def back(self):\n if self.empty():\n return \"Linked List is Empty\"\n\n h = self.head\n while h.next is not None:\n h = h.next\n\n return h.data", "def getNext(self):\n return self.__next__", "def __init__(self, data, next = None):\n\t\tself.data = data\n\t\tself.next = next", "def get_next_node(self):\n # Implemented from template for osid.resource.ResourceList.get_next_resource\n return next(self)", "def get_next(self) -> float:\n return self._current + self._offset", "def dataNode(self, role):\n return None", "def get_next(self):\n try:\n return self.the_input[self.index]\n except IndexError:\n return None", "def next(self):\r\n pass", "def next_(self, node):\n \n try:\n return list(self.suffix[node].keys())[0]\n except:\n print(\"ERROR: There is no outdegree edge\")", "def next(self):\n raise NotImplementedError", "def peek(self):\n if self.__size == 0:\n return None\n else:\n return self.__head.get_data()", "def next(self):\n if not self.iter_next():\n self.reset()\n self.data, self.label = self._read()\n self.train_cursor = self.train_cursor + self.batch_size\n return {self.data_name: self.data[0][1], self.label_name: self.label[0][1]}", "def get_next():\n return \"some_value\"", "def get_first(self):\n if self.is_empty():\n raise self.NoSuchNodeException()\n\n return self.head.data", "def get_next(self):\n return self.childs", "def next(self):\n pass", "def next(self):\n pass", "def next(self):\n pass", "def next(self):\n pass", "def __init__(self, data):\n self.data = data\n self.next = None", "def __init__(self, data):\n self.data = data\n self.next = None", "def __iter__(self):\n node = self.head\n while node is not None:\n yield node._data\n node = node._next", "def next():", "def next():", "def get_next_page(self, data):\n\n next_page = None\n\n if \"d\" in data:\n logger.debug(f\"'d' found (OData v2).\")\n if \"__next\" in data[\"d\"]:\n logger.debug(f\"'d.__next' found\")\n next_page = data[\"d\"].get(\"__next\")\n elif \"value\" in data:\n logger.debug(f\"'value' found (OData v3 or v4).\")\n if \"odata.nextLink\" in data:\n logger.debug(f\"'odata.nextLink' found (Odata v3).\")\n next_page = data.get(\"odata.nextLink\")\n elif \"@odata.nextLink\" in data:\n logger.debug(f\"'@odata.nextLink' found (Odata v4).\")\n next_page = data.get(\"@odata.nextLink\")\n else:\n logger.debug(f\"No more pages.\")\n\n return next_page", "def next(self) -> int:\n while (self.stack or self.node):\n if self.node:\n self.stack.append(self.node)\n self.node = self.node.left\n else:\n self.node = self.stack.pop()\n res = self.node.val\n self.node = self.node.right\n return res", "def getData(self):\n data = self.rx.getNData()\n\n return (data)", "def next(self):\n if self.index == len(self.nodes):\n raise StopIteration()\n node = self.nodes[self.index]\n # null nodes are inputs to the network.\n while node[\"op\"] == \"null\":\n self.index += 1\n node = self.nodes[self.index]\n\n inputs = node[\"inputs\"]\n pre_node = [self.nodes[item[0]][\"name\"]\n for item in inputs if\n not self.nodes[item[0]][\"name\"].endswith('_weight') and\n not self.nodes[item[0]][\"name\"].endswith('_bias')]\n self.index += 1\n if \"attr\" not in node:\n node[\"attr\"] = None\n else:\n node[\"attr\"] = node[\"attr\"]\n return {'type': node[\"op\"],\n 'name': node[\"name\"],\n 'inputs': pre_node,\n 'attr': node[\"attr\"]}", "def peek(self):\n return self.list.head.data", "def next(self):\n return self.filenum(), self.linenum(), self.tos().next()", "def get_data(self): # TODO: add smooth possibility\n return self.data", "def data(self):\n return self._data", "def next(self) -> int:\n node = self.stack.pop()\n ans = node.val \n self._sink(node.right)\n return ans", "def __init__(self, data=None, next=None):\n self.data = data\n self.next = next", "def __next__(self):\n if self.n < len(self.cellData):\n self.n += 1\n return self.cellData[self.n - 1]\n else:\n raise StopIteration", "def get_element(self, pos):\n curr = self.head\n count = 1\n\n while curr != None:\n if count == pos:\n return curr.data\n\n curr = curr.link\n count += 1\n return None", "def next_address():\n\t\tkeylist = vessel_list.keys()\n\t\tcurrentkey = keylist.index(str(node_id))\n\t\treturn vessel_list[keylist[(currentkey+1)%len(keylist)]]", "def value_at(self, index):\n if index==0:\n return self.head.val\n\n temp_node = self.head\n for _ in range(index):\n temp_node = temp_node.next\n return temp_node.val" ]
[ "0.7340955", "0.72560537", "0.71013474", "0.70286286", "0.70277715", "0.6898941", "0.67155683", "0.67155683", "0.67155683", "0.6683656", "0.6547143", "0.65334487", "0.65334487", "0.64254665", "0.6382568", "0.63816816", "0.63672584", "0.63516414", "0.630895", "0.6291807", "0.6252606", "0.62454075", "0.6210823", "0.619794", "0.61662865", "0.6164937", "0.61575896", "0.61573625", "0.6126518", "0.6120131", "0.60885453", "0.6075575", "0.60752827", "0.6047047", "0.60331994", "0.602574", "0.6019453", "0.60103345", "0.5993397", "0.5989314", "0.5982564", "0.5981615", "0.59697926", "0.59683084", "0.59661704", "0.5936369", "0.5935162", "0.5927912", "0.59262806", "0.59262806", "0.59225756", "0.5921911", "0.5901041", "0.587669", "0.5876323", "0.5873773", "0.5863947", "0.58608174", "0.5855229", "0.5851263", "0.5841911", "0.5840543", "0.58268934", "0.5823199", "0.5813606", "0.5811977", "0.5803527", "0.5800124", "0.5775467", "0.57719433", "0.57708424", "0.576945", "0.5766084", "0.57633793", "0.5758983", "0.5754609", "0.57497287", "0.5745066", "0.5745066", "0.5745066", "0.5745066", "0.5734479", "0.5734479", "0.57334197", "0.5733052", "0.5733052", "0.56921536", "0.5691999", "0.56904155", "0.56893754", "0.5688088", "0.56863093", "0.56853664", "0.56853616", "0.5677572", "0.56628764", "0.56608766", "0.5659412", "0.56477684", "0.5644225" ]
0.8156502
0
Constructor for the twister
def __init__(self, init): self.stepforward = int(init) self.data = Linkedlist()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, user_ctor, seed=0):\n self._user_ctor = user_ctor\n self._seed = seed\n self.reset_sampler()", "def __init__(self, seed):\n self.mt = MT19937(seed & 0xFFFF)\n self.keystream = []", "def __init__(self, seed=__default):\n\n seed = self.__default if seed == 0 else seed\n self.__mt[0] = seed & self.__genm\n for i in range(1, self.__n):\n self.__mt[i] = (self.__genp * self.__mt[i-1]) & self.__genm", "def __init__(self, a, b, t):\n\t\tself.a = a\n\t\tself.b = b\n\t\tself.t = t", "def __init__(self, seed, triple=None):\n\n self.triple = triple or self.DEFAULT_SHIFTS\n \"\"\"The triple used by the instance.\"\"\" # Attribute docstring\n\n self._seed = numpy.uint32(seed)\n self._last_rand = self._seed", "def __init__(self):\n\n # Set the seed to 0\n random.seed(0)", "def __init__(self):\n\n #call super class's __init__ method\n super(TDecaySampler, self).__init__(name=\"tdecay\", observed=False)", "def __init__(self, a, b, *args, **kwargs):\n self.a = a\n self.b = b\n super(Uniform, self).__init__(*args, **kwargs)", "def __init__(self):\n self.users = defaultdict(set)\n self.users_tweet = {}\n self.uid = 0", "def __init__(self, seed, name, email, verb, resource, rate, slo, window):\n self.name = name\n self.email = email\n self.verb = verb\n self.resource = resource\n self.rate = rate\n self.error_dist = UniformDist(seed, 0, 1.0)\n self.error_prob = 0.0\n self.bugged = False\n self.slo = slo\n self.window = window\n secs_per_request = 1.0 / self.rate\n # This distribution introduces jitter in the times\n tolerance = secs_per_request * 0.1\n self.time_dist = UniformDist(seed, secs_per_request - tolerance, secs_per_request + tolerance)", "def __init__(self):\n self.turn_count = 0\n self.targets = {}\n self.uuid_to_target = {}\n self.nest_points = set()\n self.nest_avoid_points = set()", "def __init__(self, initial, funcs, theta, noise, dt=1):\n super().__init__(initial, funcs, theta, noise)\n\n self.dt = dt", "def __init__(self, iter = 10):\n #We start calling the base constructor\n super(RandAlgo,self).__init__()\n #We then define the algorithm 'private' data members\n self.__iter = iter", "def __init__(self, walk_length, p, q, neighbours, lengths, offsets, seed=0):\n self.seed, self.seed2 = random_seed.get_seed(seed)\n self.neighbours = neighbours\n self.lengths = lengths\n self.offsets = offsets\n self.walk_length = walk_length\n self.p = p\n self.q = q\n super(BiasedRandomWalkDataset, self).__init__()", "def __init__(self, influencers, credentials, similarity_parameter, popularity_parameter, epsilon):\n \n # Twitter API credentials initialization\n auth = tweepy.OAuthHandler(credentials['consumer_key'], credentials['consumer_secret'])\n auth.set_access_token(credentials['access_token'], credentials['access_token_secret'])\n self.api = tweepy.API(auth)\n\n # Class fields\n self.username = credentials['username']\n self.influencers = influencers\n self.complete_model = None\n self.influencer_models = None\n self.userTweetsStat = {}\n self.similarities = {}\n self.similarity_parameter = similarity_parameter\n self.popularity_parameter = popularity_parameter\n self.epsilon = epsilon\n self.valueState = {influencer: 0 for influencer in self.influencers.allInfluencers}\n self.reward = 1\n self.rewardParam = 0.1\n self.alpha = 0.1\n self.gamma = 1\n self.curDif = 0", "def __init__(self, trainer):\n self.trainer = trainer", "def __init__(self):\n\n #call super class's __init__ method\n super(TRiseSampler, self).__init__(name=\"trise\", observed=False)", "def __init__(self, walk_length, neighbours, lengths, offsets, seed=0):\n self.walk_length = walk_length\n self.seed, self.seed2 = random_seed.get_seed(seed)\n self.neighbours = neighbours\n self.lengths = lengths\n self.offsets = offsets\n super(RandomWalkDataset, self).__init__()", "def __init__(self, weight, attributes, first_incident_node, second_incident_node):\n self.weight = weight # initialize all necessary fields\n self.attributes = attributes\n self.first_incident_node = first_incident_node\n self.second_incident_node = second_incident_node", "def get_twist(self):\n twist = Twist()\n twist.linear.x = self.x\n twist.linear.y = self.y\n twist.angular.z = self.theta\n return twist", "def __init__(self):\n self.timer = itertools.count(step=-1)\n self.tweets = collections.defaultdict(collections.deque)\n self.followees = collections.defaultdict(set)", "def __init__(self, bias):\n self.bias = bias", "def __init__(self, p=0.5):\n assert 0. <= p <= 1.\n self.p = p\n self.rng = T.shared_randomstreams.RandomStreams(seed=123456)\n self.params = []", "def __init__(self, transition_matrix, seed=0):\n self.transition_matrix = transition_matrix\n\n # Set up the RNG\n super().__init__(seed)", "def __init__(self):\n self.timer = itertools.count(0, -1)\n self.tweets = collections.defaultdict(collections.deque) # map userId to tweets\n self.followees = collections.defaultdict(set) # map userId to its followees", "def __init__(self):\n super(uidGenerator,self).__init__()", "def __init__(self, a):\n\n self.member = a", "def __init__(self, *args, **kwargs):\n super(jDE, self).__init__(*args, **kwargs)\n for i in range(self.population.size):\n self.population.members[i].f = 0.1 + 0.9 * numpy.random.rand()\n self.population.members[i].cr = numpy.random.rand()", "def __init__(self, tp):\n self.tp = tp", "def __init__ (self) :", "def __init__(self, dist, samples, poly=None, rule=\"random\"):\n self.dist = dist\n samples_ = chaospy.generate_samples(2 * samples, domain=len(dist), rule=rule)\n self.samples1 = samples_.T[:samples].T\n self.samples2 = samples_.T[samples:].T\n self.poly = poly\n self.buffer = {}", "def __init__(self) -> None:\n\n super(Levenshtein, self).__init__()", "def __init__(self, walker, targets=None):\n super(MoveWalkerToRandomTarget, self).__init__(walker)\n self._targets = targets\n self._target_to_move_to = None", "def __init__(self, rng):\n self.rng = rng\n self.state = None", "def __init__(self, value, direction=None):", "def __init__(self, coor1, coor2):\n self.coor1 = coor1\n self.coor2 = coor2", "def __init__(self, transformer):\n self.transformer = transformer", "def __init__(self, corpus, seed=None):\n super().__init__()\n self.corpus = corpus\n self.seed = seed\n self.idxs = list(range(len(corpus)))\n self.shuffle(seed)", "def __init__(self,\n seed: Union[None, int, str, bytes] = None,\n *args, **kwargs) -> None:\n self.seed = seed\n self._seed = c.text_to_int(self.seed)\n self._rng = default_rng(self._seed)\n super().__init__(*args, **kwargs)", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(self, random_state):\n self.random_state = random_state\n self.random_generator = RandomState(self.random_state)", "def __init__(self):\n super(SiLU, self).__init__()", "def __init__(self):\n\t\tself.upper, self.lower = 0,0\n\t\tself.timestamp = 0", "def __init__(self):\n self.word_to_freq = {}\n self.head = Node()\n self.tail = Node()\n self.head.next = self.tail\n self.tail.prev = self.head\n self.freq_to_node = {0: self.head, sys.maxint: self.tail}", "def initialize(cls):", "def __init__(self, num_features=NUM_FEATURES, num_samp=NUM_SAMP):\n num_samp = NUM_SAMP\n sigma = 0.1\n np.random.seed(31415)\n\n # We're going to learn these paramters\n self.w = np.random.randint(low=0, high=5, size=(num_features, 1))\n self.b = 2\n\n self.index = np.arange(num_samp)\n self.x = np.random.uniform(size=(num_samp, num_features))\n self.y = self.x @ self.w + self.b + sigma * np.random.normal()", "def __init__(self):\n # 保存用户推特数据\n self.user_pool = defaultdict(UserInfo)\n self.twitter_pool = defaultdict(list)\n self.time = 0", "def __init__(self):\n\n super().__init__(2)", "def __init__(self, parant):\n pass", "def __init__ (self):\n pass", "def __init__(self, *args):\n _snap.TRnd_swiginit(self, _snap.new_TRnd(*args))", "def __init__(self):\n self.x_y_token_triplets = []", "def __init__(self):\n super().__init__()\n\n # Will only reply to every 3rd or so tweet, defined in settings\n self.received_tweet_count = 0\n\n # Twitter api init\n self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n self.auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n self.twitter_api = tweepy.API(self.auth)\n\n print('Authenticated, creating stream...')\n\n self._init_stream()", "def __init__(self, t1, t2, tmax, Lmax):\r\n object.__init__(self)\r\n self.t1 = t1\r\n self.t2 = t2\r\n self.tmax = tmax\r\n self.Lmax = Lmax", "def __init__(self):\n self.py = random.getstate()\n self.np = np.random.get_state()\n self.torch = torch.get_rng_state()", "def __init__(self, weights):\n self._weights = weights", "def __init__(self, go):\n Brain.__init__(self, go, next_thought = self.go_till_wall)", "def __init__(self, seed):\n\n seed = params.integer(seed, from_=0, to=2 ** 32 - 1)\n self._random = np.random.RandomState(seed=seed)", "def __init__(self, input, output) :\n Gate.__init__(self, [input], output)", "def __init__(self):\n self.subscribers = {}\n self.followers = {}\n self.nackables = {}\n self.threads = []", "def __init__(self):\n # initialize a bird to default values.\n self.set_instance_vars()\n\n # randomize some parameters, such as starting height\n self.pos_y = self.random_height()\n\n # tag each bird\n\n self.identifier = Bird.num_birds\n\n # create ai net for each bird\n self.initialize_ai()\n\n # increment Bird counter\n Bird.num_birds += 1\n\n # remember time of birth\n self.birth_time = 0", "def __init__(self, hydrometer):\n self.hydrometer = hydrometer", "def __init__(self, aeropuertos):\n self.iter = iter(aeropuertos)", "def __init__(self):\n self._trials = [] # private variables _ convention", "def __init__(self, seed=None):\n self.seed(seed)\n self.hashfun = \"SHA-256\"\n self._basehash()", "def __init__(self,*args):\n pass", "def __init__(self, rate):\n super(RandomWander, self).__init__()\n self.iteration = 0\n self.rate = rate\n self.speed = 0\n self.heading = 0", "def __init__(\n self,\n wrapper,\n embedder,\n transformer,\n ):\n dims = (\n wrapper.num_people,\n wrapper.num_samples,\n wrapper.num_people - 1,\n )\n super().__init__(wrapper, dims)\n self.embedder = embedder\n self.transformer = transformer", "def __init__(self, bits): \n self.n = self.generarN(bits)\n length = self.bitLen(self.n)\n seed = random.getrandbits(length)\n self.semilla(seed)", "def __init__(self, s, a, b, c, d, t = None):\n self.s = s\n self.a = a\n self.b = b\n self.c = c\n self.d = d\n self.t = t", "def __init__(self) -> None:\n # TODO: Provide the complete constructor for this object", "def __init__(self,nback=1,ntokens_pm=2,ntokens_og=3,stimdim=2,seed=99):\n np.random.seed(seed)\n tr.manual_seed(seed)\n self.nback = nback\n # embedding\n self.ntokens_pm = ntokens_pm\n self.ntokens_og = ntokens_og\n self.stimdim = stimdim\n # emat\n self.randomize_emat()\n return None", "def __init__(self, numInputs, numOutputs, randomize=False, bias=False):\n super().__init__(numInputs, numOutputs, randomize, bias)\n self.iterations = 0", "def __init__(self, *args):\n _snap.TSStr_swiginit(self, _snap.new_TSStr(*args))", "def __init__(self, weights, biases):\n super().__init__()\n\n self.weights = weights\n self.biases = biases", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass" ]
[ "0.65222347", "0.63090914", "0.62000424", "0.6192566", "0.614152", "0.612248", "0.6101128", "0.60043067", "0.59858197", "0.59820634", "0.5972612", "0.5955955", "0.595311", "0.59528494", "0.5922501", "0.5905344", "0.5901768", "0.58944374", "0.5858806", "0.58463246", "0.5831957", "0.58284014", "0.58200043", "0.581978", "0.5817637", "0.58165276", "0.58119786", "0.5794607", "0.57937515", "0.579215", "0.578616", "0.57803833", "0.57777464", "0.5777372", "0.5773326", "0.57720274", "0.5769359", "0.5768655", "0.576231", "0.5753744", "0.5753744", "0.5753744", "0.5753744", "0.5753744", "0.5753744", "0.5753744", "0.5753744", "0.5753744", "0.5753744", "0.5726433", "0.57151395", "0.5708646", "0.5704596", "0.5697987", "0.56955516", "0.56940794", "0.5690041", "0.56826705", "0.5681057", "0.5680482", "0.5679545", "0.567823", "0.5673757", "0.56712157", "0.5670876", "0.56685835", "0.56668496", "0.56630063", "0.5658467", "0.56568646", "0.5652175", "0.56514126", "0.56496125", "0.5647689", "0.5646903", "0.56464845", "0.5646103", "0.56449676", "0.5639318", "0.5634914", "0.5634315", "0.5630891", "0.56294024", "0.56288445", "0.56282973", "0.56282973", "0.56282973", "0.56282973", "0.56282973", "0.56282973", "0.56282973", "0.56282973", "0.56282973", "0.56282973", "0.56282973", "0.56282973", "0.56282973", "0.56282973", "0.56282973", "0.56282973", "0.56282973" ]
0.0
-1
Representation of the spinlock
def __repr__(self): return "Spinlock({})".format(self.stepforward)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lock_control(self):\n raise NotImplementedError('PlatformService: Implementation incomplete')", "def spinlocks(self):\n return self._spinlocks", "def lock(self):\n raise NotImplementedError", "def lock(self):\r\n return self._lock", "def f_lock(self):\n self._locked = True", "def lock(*args):", "def i_am_locking(self):\r\n pass", "def lock_blocks(self) -> int:", "def v_locked(self):\n return self._locked", "def action_lock(self):\n self.state = 'locked'", "def lock(self, item_type):", "def lock(self):\n print(\"DEPRECATED lock\")\n return self._operations.lock()", "def _lock_key(self):\n return hash_string_64bit('dirbs-listgen')", "def is_locked(self):\r\n pass", "def get_spin(self, i):\n \n return 1 if self.spins[i] else -1", "def lock(self):\n return self._lock", "def break_lock(self):\r\n pass", "def getSpinControl(*args):", "def _b_spin_changed(self):\n self.bLine.setValue(self.bSpin.value())", "def lock_object(self):\n return gevent.thread.allocate_lock()", "def pilotLock (self):\n return self.unlock()", "def lock (self):\n self.locked = True\n self._changed = False", "def lock_clock(self):\n self.sem.acquire()", "def __getattr__(self, name):\n return getattr(self._lock, name)", "def locked(self):\n\t\treturn self.__locked", "def __enter__(self):\n return self._lock.__enter__()", "def unlocked():\r\n return Lock(None)", "def read_lock_bits(self):\n self.writecmd(self.APP, self.WRITE3_READ1, 4, [0x54, 0x00, 0x00, 0x00])\n return [(ord(self.data[0]) >> x) & 1 for x in range(5)]", "def __getstate__(self):\n state = self.__dict__\n state['_lock'] = None\n return state", "def spin(self, *args, **kwargs) -> Any:\n pass", "def test_barrier_spinlocks():\n if (not test_spinlocks):\n py.test.skip(\"gcc > 4.6 is nescessary for this test\")\n parrent_test_barrier(UseSpinLock=True)", "def svn_info_t_lock_get(svn_info_t_self): # real signature unknown; restored from __doc__\n pass", "def __getitem__(self, hashlock):\n return self.locked[hashlock]", "def tarantool_lock(self):\n if self._lockinst is None:\n self._lockinst = threading.Lock()\n\n return self._lockinst", "def ToggleLock(self, event):\n pass", "def lock(self, nReserved):\n\t\treturn Job(SDK.PrlVm_Lock(self.handle, nReserved)[0])", "def rlock_object(self):\n return RLock()", "def spin(rxn_class):\n return rxn_class[1]", "def lock(self) -> TokenEKeyLock:\n return self._lock", "def spin(self) -> float:\n return self._s", "def spin(mult):\n return mult - 1", "def shared_lock(self):\n return self.lock(False)", "def event_lock_delay(self) -> int:\n raise NotImplementedError", "def get_lock_time():\n pass", "def __call__(self, resource: LockResource, timeout: timedelta) -> Lock:", "def thread_lock(self, bay_uuid):\n try:\n self.acquire()\n yield\n except exception.OperationInProgress:\n raise\n except: # noqa\n with excutils.save_and_reraise_exception():\n self.release(bay_uuid)", "def miner_lock_blocks(self) -> int:", "def lock(self):\n\n self.wait = True", "def lock(self, writelock=False, nattempts=0):\n return _image.image_lock(self, writelock, nattempts)", "def Locked(self) -> bool:", "def spin_input(self) -> None:\n self.spin_command = not self.spin_command", "def _g_spin_changed(self):\n self.gLine.setValue(self.gSpin.value())", "def lock_table(self):\n\n self.status = 'Locked'", "def get_lock():\n\n return multiprocessing.Lock()", "def _r_spin_changed(self):\n self.rLine.setValue(self.rSpin.value())", "def processLock(self):\r\n self.controller.executionLock()", "def RLock():\n import threading\n return threading.RLock()", "def acquire_lock(self):\n self._multistore._lock()", "def test_barrier():\n parrent_test_barrier(UseSpinLock=False)", "def locked(self):\n with self._block:\n status = repr(self).split(maxsplit=1)[0][1:]\n assert status in ('locked', 'unlocked')\n return status == 'locked'", "def get_lock(self, name, try_=False):\n lock = Lock(self, name, try_)\n with lock as got_lock:\n yield got_lock", "def __init__(self):\n self.clock = 0\n self.sem = asyncio.Semaphore(1)", "def lock(self, value=True):\n self.lock_ = value\n\n return self", "def __repr__(self) -> str:\n return repr(self._lock).replace(\n '_thread.RLock',\n '{cls.__module__}.{cls.__class__.__name__}'.format(cls=self))", "def set_lock(self, value):\n act = LockAction(self, value)\n return act.invoke()", "def _spin_sequence(self):\n self.return_position = self.motor.position\n \n for spin_num in range(self.num_spins):\n self.taxi()\n\n det_pre_acquire(self.detector)\n self.fly()\n det_post_acquire(self.detector)\n\n while self.detector.hdf1.write_file.value:\n time.sleep(0.01) # wait for file to be written\n\n event = OrderedDict()\n event[\"time\"] = time.time()\n event[\"seq_num\"] = spin_num + 1\n event[\"data\"] = {}\n event[\"timestamps\"] = {}\n for d_item in (self.detector.hdf1.full_file_name,):\n d = d_item.read()\n for k, v in d.items():\n event['data'][k] = v['value']\n event['timestamps'][k] = v['timestamp']\n\n self._data.append(event)\n\n self.set(\"Return\")\n self._completion_status._finished(success=True)", "def get_lock(name):\n return _handler_locks[name]", "def lock(self):\n self._privkey = None\n self.locked = True", "def spinAround(self):", "def dead_lock(self):\n return None", "def set_lock_time():\n\n pass", "def __init__(\n self,\n door: Door,\n **kwargs,\n ):\n\n super().__init__(ElementTypes.LOCK, **kwargs)\n\n self.door = door", "def createLock(self):\n self.lock = stdoutlock", "def __init__(self):\n self._monitor_lock = threading.Lock() # type: threading.Lock", "def lock_gate(self):\n self.fsm_gate.clear()", "def read_acquire(self):\n self.is_locked = True\n self.rwlock = RWLock().read_acquire()", "def _spinner_key():\n with _spinner_key.lock:\n _spinner_key.counter += 1\n return \"_spinner_%d\" % _spinner_key.counter", "def init(l):\n global lock\n lock = l", "def init(l):\n global lock\n lock = l", "def arm_oplock_future(self):\n self.oplock_future = self.tree.session.client.oplock_break_future(self.file_id)", "def pin_wheel(self):\n spinner = '|/-\\\\'\n sys.stdout.write('%s \\r' % spinner[self.last_spin])\n sys.stdout.flush()\n self.last_spin += 1\n if self.last_spin > len(spinner) - 1:\n self.last_spin = 0\n time.sleep(.2)", "def locked(self, value):\n if value is None:\n value = False\n elif isinstance(value, int):\n value = value == 1\n elif isinstance(value, str):\n value = value.lower() in [\"true\", \"1\"]\n assert isinstance(value, bool)\n self._locked = value", "def get_state(self):\n if not self._variable.get():\n return \"Locked\"\n\n elif self._variable.get():\n return \"Unlocked\"", "def create_lock() -> Lock:\n return Lock()", "def on_spin(self, event):\n spin_value = self.spin_run.GetValue()\n text = \"\".join([_(u\"New run spin control value: \"), str(spin_value)])\n if self.state == 0:\n self.canvas_2d.render(text)\n else:\n self.canvas_3d.render()\n return spin_value", "def holding(self) -> float:\n return self._holding", "def __init__(self, name='KillableThread'):\n self._stopevent = threading.Event( )\n self._sleepperiod = 1.0\n threading.Thread.__init__(self, name=name)", "def oplocks(self):\n return self._oplocks", "def is_holding(self):\n return self.holding", "def __init__(self, *args):\n this = _ida_segment.new_lock_segment(*args)\n try: self.this.append(this)\n except: self.this = this", "def latch(self):\n return self._latch", "def lock(self):\n self.mtx.acquire()", "def _get_locked(self, mountpoint):\n # This dance is because we delete locks. We need to be sure that the\n # lock we hold does not belong to an object which has been deleted.\n # We do this by checking that mountpoint still refers to this object\n # when we hold the lock. This is safe because:\n # * we only delete an object from mountpounts whilst holding its lock\n # * mountpoints is a defaultdict which will atomically create a new\n # object on access\n while True:\n mount = self.mountpoints[mountpoint]\n with mount.lock:\n if self.mountpoints[mountpoint] is mount:\n yield mount\n break", "def set_lock_status(use_lock):\r\n get_lock.lock_is_enabled = use_lock", "def locked(self):\n return self._locked", "def locked(self):\n return self._locked", "def locked(self):\n return self._locked", "def svn_info_t_lock_set(svn_info_t_self, svn_lock_t_lock): # real signature unknown; restored from __doc__\n pass", "def is_unlocked(self):\r\n return self._lock_fd is None", "def __setstate__(self, state):\n state['_lock'] = Lock()\n self.__dict__.update(state)" ]
[ "0.6281956", "0.61268324", "0.60570246", "0.60079235", "0.5999252", "0.5991468", "0.5932551", "0.5889748", "0.57721347", "0.5762535", "0.5744118", "0.57419753", "0.5740945", "0.5726452", "0.57194465", "0.56717205", "0.56714046", "0.56600225", "0.5617723", "0.5606084", "0.5601709", "0.5601595", "0.55989456", "0.5584011", "0.55203074", "0.55102026", "0.54959446", "0.5492584", "0.548023", "0.5472547", "0.5470681", "0.54609334", "0.54471576", "0.5423764", "0.5415074", "0.5413588", "0.540975", "0.54064894", "0.5391948", "0.5391606", "0.539117", "0.5390712", "0.53801763", "0.5379115", "0.53773457", "0.5376391", "0.5346932", "0.5317493", "0.5287969", "0.52828705", "0.52821493", "0.52812886", "0.5261284", "0.52573645", "0.52527755", "0.52418697", "0.5233987", "0.5224385", "0.5202544", "0.5201621", "0.51948357", "0.519419", "0.5194176", "0.5190234", "0.51876235", "0.5178126", "0.51770616", "0.5176678", "0.51748323", "0.5173132", "0.5171837", "0.51677525", "0.51638174", "0.5160032", "0.51409173", "0.5136436", "0.51312894", "0.5120776", "0.5120776", "0.5120563", "0.51190567", "0.51164454", "0.5116351", "0.5109898", "0.5108689", "0.5106039", "0.5099724", "0.5098218", "0.50971884", "0.50935537", "0.50802326", "0.5079454", "0.50783426", "0.5076173", "0.50758314", "0.50758314", "0.50758314", "0.5054972", "0.50520813", "0.5047546" ]
0.71650106
0
Move n steps then insert a new value
def process(self, count): self.data.add_node(0) for index in range(1, count + 1): # print("{}.: {}".format(index, self.data)) self.data.move_circular(self.stepforward) self.data.add_node(index) return self.data.get_next()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def increment_step(self, n_steps: int) -> None:\n self.step = self.policy.increment_step(n_steps)", "def increment_step(self, n_steps: int) -> None:\n self.step = self.policy.increment_step(n_steps)", "def increment_steps(self):\n self.num_steps += 1", "def make_step(self):\n self.step_vals = np.cumsum(self.vals)", "def move(self, step):\n for point in self.points:\n l = min(len(step), len(point.position))\n for i in range(l):\n point.position[i] = step[i]", "def _increment_move(move, direction, n):\n # print(move)\n move = list(map(sum, zip(move, direction)))\n #move = (move[0]+direction[0], move[1]+direction[1])\n while all(map(lambda x: 0 <= x < n, move)):\n #while 0<=move[0] and move[0]<n and 0<=move[1] and move[1]<n:\n yield move\n move=list(map(sum,zip(move,direction)))\n #move = (move[0]+direction[0],move[1]+direction[1])", "def step(self, n, dlist):\n pass", "def _increment_move(move, direction, n):\n # print(move)\n move = list(map(sum, zip(move, direction)))\n #move = (move[0]+direction[0], move[1]+direction[1])\n while all(map(lambda x: 0 <= x < n, move)): \n #while 0<=move[0] and move[0]<n and 0<=move[1] and move[1]<n:\n yield move\n move=list(map(sum,zip(move,direction)))\n #move = (move[0]+direction[0],move[1]+direction[1])", "def step(self, move):", "def insertTraversal(self, value):\n if random.random() <= 0.5:\n self.__insertTraversalLeft(value)\n else: \n self.__insertTraversalRight(value)", "def move_step(self, move):\n # Check that the move is valid\n steps = self.mgr.obj.steps\n if len(steps) == 0:\n return\n idx = self.stepsListWidget.currentRow()\n idx_max = len(steps) - 1\n if (idx+move < 0) or (idx+move > idx_max):\n return\n \n # Insert the step at its new location, then delete it at the old location\n steps.insert(idx+move+(move>0), steps[idx])\n del steps[idx if move>0 else idx+1]\n \n self.load_steps()\n self.stepsListWidget.setCurrentRow(idx+move)\n self.mgr.changed = True", "def move_stack(n, start, end):\n assert 1 <= start <= 3 and 1 <= end <= 3 and start != end, \"Bad start/end\"\n \n if n == 1:\n print_move(start, end) \n else:\n extra_pole = 6 - start - end\n move_stack(n-1, start, extra_pole)\n move_stack(1, start, end)\n move_stack(n-1, extra_pole, end)\n\n #GSI helped, still don't completely understand, need to review more", "def step(self, move):\r\n self.board.push_uci(move)\r\n self.num_halfmoves += 1", "def _shift_up(self, idx):\n\n parent = (idx - 1) // 2\n while parent >= 0 and self.value(parent) < self.value(idx):\n self.items[parent], self.items[idx] = self.items[idx], self.items[parent]\n idx = parent\n parent = (idx - 1) // 2", "def steps(self, step_count):\n self.dir.value(0 if step_count > 0 else 1)\n for i in range(abs(step_count)):\n self.stp.value(1)\n sleep_us(self.step_time)\n self.stp.value(0)\n sleep_us(self.step_time)\n self.current_position += step_count", "def insert(self, value):\n tower = [None] * self.max_levels\n steps_at_level = [0] * self.max_levels\n node = self.head\n for level in reversed(range(self.max_levels)):\n while node.next[level].value <= value:\n steps_at_level[level] += node.width[level]\n node = node.next[level]\n tower[level] = node\n d = min(self.max_levels, 1 - int(math.log(random(), 2.0)))\n node = self.Node(value, [None] * d, [None] * d)\n steps = 0\n for level in range(d):\n prev = tower[level]\n node.next[level], prev.next[level] = prev.next[level], node\n node.width[level] = prev.width[level] - steps\n prev.width[level] = steps + 1\n steps += steps_at_level[level]\n for level in range(d, self.max_levels):\n tower[level].width[level] += 1\n self.size += 1", "def move_back(t,n):\n lt(t)\n bk(t, n)\n rt(t)", "def make_times_with_n_step(self, start, end, n):\n self.target_times = []\n step = start\n delta = old_div((end - start), float(n))\n while step <= end:\n self.target_times.append(step)\n step += delta", "def insert(self, index, value):\n if self.head is None:\n self.append(value)\n return\n \n from_head = True if index >= 0 else False \n if from_head: \n node = self.head\n steps = index \n else:\n node = self.tail \n steps = abs(index) -1 \n while steps > 0 and node is not None:\n node = node.next_node if from_head else node.prev_node \n steps -= 1 \n \n if node is None:\n if from_head: \n self.append(value)\n return\n else:\n self.push_front(value)\n return\n if node is self.head:\n self.push_front(value)\n return\n else:\n new_node = DLLNode(value)\n new_node.next_node = node\n new_node.prev_node = node.prev_node\n node.prev_node.next_node = new_node\n node.prev_node = new_node \n return", "def DELAY(A, n):\r\n At = pivot_table(A)\r\n res = At.shift(n)\r\n res = stack_table(res)\r\n return res", "def move_by(self, increment):\n return self.move_to(self.position + increment)", "def move(self): # AH note. Swich move with extra_steps?\n if self.adjustment < 0:\n self.position += self.extra_steps\n super().move()\n self.no_moves += 1\n # Do the regular move", "def _move(self, direction, count, step_size):\n for _ in range(count):\n if not self.has_hit_edge():\n self.position = min(self.scene.step_count - 1, max(0,\n self.position + direction * step_size))\n self._take_measure()\n self.steps_taken += 1\n self.visited_positions.append(self.position)", "def _roll(self):\n order = np.array(self.order)\n nsteps = np.array(self.nsteps)\n order[nsteps > 1] = np.roll(order[nsteps > 1], 1)\n self.order = order.tolist()", "def InsertNextPoint(self, ):\n ...", "def ad_step_to_progress_bar(self, n):\r\n self.progress_step += n\r\n self.progress[\"value\"] = self.progress_step\r\n self.progress.update_idletasks()", "def advance_rollout(self, buffer_index: int = 0):\n self._cur_step_idxs += self._last_should_inserts", "def insert_after(self, request, current_step, step):\n steps = self.get_steps(request)\n\n if step not in steps:\n index = steps.index(current_step) + 1\n steps.insert(index, step)", "def move_stack(n, start, end):\n assert 1 <= start <= 3 and 1 <= end <= 3 and start != end, \"Bad start/end\"\n \"*** YOUR CODE HERE ***\"\n if n == 1:\n print_move(start, end)\n return\n handle = 6 - start - end\n move_stack(n - 1, start, handle)\n print_move(start, end)\n move_stack(n - 1, handle, end)\n return", "def up_to(self, n, l):\n for i in range(self._, n):\n l(i)\n return self", "def move(self, n: int) -> \"Linked[T]\":\n out = self\n if n >= 0:\n for _ in range(n):\n out = out.forward\n else:\n for _ in range(-n):\n out = out.backward\n return out", "def step(self, steps):\n self._simulate(endStep=self.currentStep+steps)", "def move_element(self,n_a,n_b):\n self.element_array.insert(n_b,self.element_array.pop(n_a))", "def _advance(self):\n self._prev, self._current = self._current, self._prev + self._current", "def move(degs, i, j, n):\n if n > 0:\n temp = 3 - i - j\n move(degs, i, temp, n - 1) \n degs[j].append(degs[i].pop(-1))\n print(degs)\n move(degs, temp, j, n - 1)", "def insert(self, i, x) -> None:\n pass", "def fill_step(self):\n while len(self.x_values) < self.num_points\n x_step = self.get_step()\n y_step = self.get_step()\n if x_step == 0 and y_step == 0:\n continue\n next_x = self.x_values[-1] x x_step\n next_y = self.y_values[-1] + y_step\n self.x_values.append(next_x)\n self.y_values.append(next_y)", "def insert(self, n):\n # The distance from the ith cell to the jth probe.\n dij = n.XY.reshape((2,-1,1)) - self.points.reshape((2,1,-1))\n dij = (dij**2).sum(axis=0) / self.radius\n dij[dij < 1] = 1\n self.M = 1 / dij\n self.n = n", "def move_stack(n, start, end):\n assert 1 <= start <= 3 and 1 <= end <= 3 and start != end, \"Bad start/end\"\n \"*** YOUR CODE HERE ***\"", "def move_stack(n, start, end):\n assert 1 <= start <= 3 and 1 <= end <= 3 and start != end, \"Bad start/end\"\n \"*** YOUR CODE HERE ***\"", "def insert(self, idx, value):\n assert(isinstance(idx, int))\n nidx = self._normalize_idx(idx)\n self.data.append(None)\n for i in range(len(self.data)-1,idx,-1):\n self.data[i] = self.data[i-1]\n self.data[idx] = value", "def step(self):\n \n if not self.instant_grow_back:\n self.amount = min([self.max_sugar, self.amount + 1])\n else:\n self.amount = self.max_sugar", "def turn_steps(self, steps, delay_ms=1):\n if steps < 0:\n direction = -1\n else:\n direction = 1\n for _ in range(abs(int(steps))):\n self.current_step += direction\n element = STEP_ELEMENTS[self.current_step % N_STEP_ELEMENTS ]\n self.set_bits(element)\n time.sleep_ms(delay_ms)", "def simulate(self, n, dt=None):\n for _ in range(n):\n self.step(dt)", "def incr_max_step( bgn , end , stepSize ):\n numSteps = (end - bgn) / stepSize\n rtnLst = [ bgn + i * stepSize for i in xrange( trunc(numSteps) + 1 ) ]\n if numSteps % 1 > 0:\n rtnLst.append( end )\n return rtnLst", "def skip_add(n):\n \n\n\n\n if n ==0:\n return 0\n if n ==1:\n return 1\n else:\n return n + skip_add(n-2)", "def nextStep(self):\n next_step = self.current_step + 1\n if next_step > self.total_steps:\n return\n\n self.current_chunk_size = next_step - sum(self.chunk_percentage)\n self.current_step = next_step", "def shift(a, n=1):\n return a[n:] + a[:n]", "def move_by(cls, value):\n cls.set_position(cls._position + value)", "def steps(data: typing.List[int], stranger: bool = False) -> int:\n data = data.copy()\n ptr = 0\n steps = 0\n while 0 <= ptr < len(data):\n offset = data[ptr]\n if stranger and offset >= 3:\n data[ptr] -= 1\n else:\n data[ptr] += 1\n ptr += offset\n steps += 1\n return steps", "def up(self, i):\n x = self.heap[i]\n while i > 1 and x < self.heap[i // 2]:\n self.heap[i] = self.heap[i // 2]\n self.rank[self.heap[i // 2]] = i\n i //= 2\n self.heap[i] = x # insertion index found\n self.rank[x] = i", "def up(self, i):\n x = self.heap[i]\n while i > 1 and x < self.heap[i // 2]:\n self.heap[i] = self.heap[i // 2]\n self.rank[self.heap[i // 2]] = i\n i //= 2\n self.heap[i] = x # insertion index found\n self.rank[x] = i", "def __call__(self, new_val, previous_val, step):\n\t\treturn", "def move(self, head, steps):\n self.turn(head)\n if self.direction == 0:\n self.x += int(steps)\n if self.direction == 1:\n self.y += int(steps)\n if self.direction == 2:\n self.x -= int(steps)\n if self.direction == 3:\n self.y -= int(steps)", "def _insert_action_after(self, idx, name, value):\n i = list(self.pipeline.index).index(idx)\n part1 = self.pipeline[0 : i + 1]\n new_item = pd.Series([value], index=[name])\n part2 = self.pipeline[i + 1 :]\n self.pipeline = pd.concat([part1, new_item, part2])", "def prepend_nb(a, n, value):\n out = np.empty((a.shape[0] + n, a.shape[1]), dtype=np.float_)\n out[:n, :] = value\n out[n:, :] = a\n return out", "def _step_snell(self) -> None:\n self.snell.step()", "def backtrack_steps():\n\n # Initialize position and number of steps\n x = 0\n n_steps = 0\n\n # Walk until we get to positive 1\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n\n return n_steps", "def _step(\n self,\n states: np.ndarray,\n actions: List[np.ndarray],\n rewards: np.ndarray,\n next_states: np.ndarray,\n dones: np.ndarray,\n ) -> None:\n self.memory.add(states, np.concatenate(actions), rewards, next_states, dones)\n self.step_count += 1\n\n if (\n len(self.memory) > self.batch_size\n and (self.step_count % self.update_every) == 0\n ):\n self._optimize()", "def advance_n(self,n):\n print(self)\n for i in range(n):\n self.advance_one()\n print(self)", "def move_items(self):\n self.set_fibonnaci_levels()", "def incr(n=1):\n for i in xrange(n):\n pulse_hi(INCR)", "def test_shift_sets_new_tail_is_previous(new_dll):\n new_dll.shift()\n assert new_dll.tail.value == 4", "def first_order_posint(self, timestep):\n self.prev_pos = self.position\n self.position = self.position + (self.velocity * timestep)", "def insert_step(self, key, step, after, before=None):\n\n step = self._steps.update(key, step)\n step.set_parent(self)\n\n if after == \"$prev\" and len(self._steps) == 1:\n after = None\n\n previous = \"\"\n if after:\n if after == \"$prev\" and self._last_added:\n previous = self._last_added.name\n else:\n if after not in self._steps.keys():\n raise MLRunInvalidArgumentError(\n f\"cant set after, there is no step named {after}\"\n )\n previous = after\n step.after_step(previous)\n\n if before:\n if before not in self._steps.keys():\n raise MLRunInvalidArgumentError(\n f\"cant set before, there is no step named {before}\"\n )\n if before == step.name or before == previous:\n raise GraphError(\n f\"graph loop, step {before} is specified in before and/or after {key}\"\n )\n self[before].after_step(step.name)\n self._last_added = step\n return step", "def step(self):\n all_p = self.amount\n neighbors = self.model.grid.get_neighbors(self.pos, True)\n for n in neighbors:\n all_p += n.amount\n ave_p = all_p / (len(neighbors) + 1)\n\n self._nextAmount = (1 - self.model.evaporate) * \\\n (self.amount + (self.model.diffusion * \\\n (ave_p - self.amount)))\n\n if self._nextAmount < self.model.lowerbound:\n self._nextAmount = 0", "def move(t, length):\n pu(t)\n\t\n fd(t, length)\n pd(t)", "def addNodeAfter(self, new_value, after__node): # Class O(n)\r\n if not isinstance(new_value, Node):\r\n if new_value % 1 != 0: raise ValueError(\"Please, insert an integer\")\r\n if after__node > self.length(): raise ValueError(\"Invalid position\")\r\n count = 1\r\n h = self.head\r\n while count != after__node:\r\n h = h.next\r\n count += 1\r\n move_after = h.next\r\n h.next = Node(new_value)\r\n h.next.next = move_after", "def insert(self, pos, length):\n if pos in self.insertions:\n self.insertions[pos] += length\n else:\n self.insertions[pos] = length", "def insertion_optimized(array):\n for i,val in enumerate(array):\n while i>0 and val<array[i-1]:\n array[i] = array[i-1]\n i -= 1\n array[i] = val", "def vec_shift_left_n(x, n):\n return jnp.zeros_like(x).at[0:-n].set(x[n:])", "def tsp_walk(n, op, nsteps):\n result = []\n t = list(range(n))\n result.append(tuple(t))\n for i in range(nsteps):\n t = op(t)\n result.append(tuple(t))\n return result", "def fill_walk(self):\n\n # keep walking until walk reaches desired length\n while len(self.x_values) < self.num_points:\n\n x_step = self.get_step()\n y_step = self.get_step()\n\n #rejects streps that do nowhere\n if y_step == 0 and x_step ==0:\n continue\n\n #calculate the next x and y values for the list and add it [-1] gets the last item in a list\n next_x = self.x_values[-1] + x_step\n next_y = self.y_values[-1] + y_step\n\n self.x_values.append(next_x)\n self.y_values.append(next_y)", "def sow_step(self, player, move):\n init_pit = move\n stones = self.p_pits(player.index)[init_pit]\n clen = 2 * self.M + 1\n\n if player.index == 1:\n cstate = self.state[:-1]\n else:\n cstate = self.p2_pits() + [self.p2_store()] + self.p1_pits()\n\n per_add = stones // clen\n dis_pit = stones % clen\n\n cstate[init_pit] = 0\n last_pit = (init_pit + dis_pit) % clen\n new_state = [i + per_add for i in cstate]\n if last_pit > init_pit:\n new_state = [\n v + 1 if init_pit < i <= last_pit else v\n for i, v in enumerate(new_state)\n ]\n elif last_pit < init_pit:\n new_state = [\n v + 1 if (init_pit < i or i <= last_pit) else v\n for i, v in enumerate(new_state)\n ]\n else:\n pass\n\n if player.index == 1:\n return new_state + [self.p2_store()], last_pit\n else:\n return new_state[-self.M:] + [self.p1_store()\n ] + new_state[:-self.M], last_pit", "def step(self):\n value = self.current_event[\"step\"][\"value\"]\n self.current_value.append(value)", "def insertnln(n=1):\r\n\tidx = 0\r\n\twhile idx < n:\r\n\t\tCONSOLE.insertln()\r\n\t\tidx = idx + 1", "def testInsert(self):\n\n for i in xrange(randint(50,150)):\n self.s.insert(i, None)", "def _step(self) -> None:", "def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n vals = [1, 2, 3]\n if n <= 3:\n return vals[n-1]\n for i in range(n - 3):\n new_val = 3 * vals[0] + 2 * vals[1] + 1 * vals[2]\n vals = vals[1:] + [new_val]\n return vals[-1]", "def move_next(self, step=1):\n if self._index is not None and len(self) > self._index + step:\n self._index += step\n # if index >= end index of current frame --> recalculate findex\n if self._index >= self._findex * self._flen + self._flen:\n self._findex += int(math.ceil(step / float(self._flen)))\n return self[self._index]\n return None", "def move(self, amount):\n self.__validate_index(amount)\n for i in range(amount):\n self.__list = self.__list[1:] + [self.__list[0]]\n return self.__list", "def step(self, n=1, debug=True):\n for i in range(n):\n # Don't run if in sigterm state\n if self.state == self.sigterm:\n break\n\n if debug:\n print(self)\n\n # read next instruction\n instruction_key = (self.tape[self.index], self.state)\n # update\n self.tape[self.index], step, self.state = self.program[instruction_key]\n # move head\n self.index += 1 if step == '>' else -1", "def jumpahead(self, n):\n self.counter += n\n self.basehash.update(b'\\x00'*n)", "def _maybe_add(self, chunks, i, direction):\n target_chunk = chunks[i]\n if direction == \"next\":\n source_chunk = chunks[i+1]\n path_index = 0\n move_func = lambda: target_chunk.append(source_chunk.popleft())\n elif direction == \"prev\":\n source_chunk = chunks[i-1]\n path_index = -1\n move_func = lambda: target_chunk.appendleft(source_chunk.pop())\n else:\n raise ValueError(\"Unexpected move direction %s\" % direction)\n\n return self._maybe_move(source_chunk, target_chunk, path_index, move_func)", "def stump(t, n, angle=90):\n lt(t)\n fd(t, n)\n rt(t, angle)", "def move(self, *step):\n self.x += step[0]\n self.y += step[1]", "def increment(self):\r\n self.add_output()\r\n for i in range(self.length-1, 0, -1):\r\n self.slots[i].item = self.slots[i-1].item\r\n self.slots[0].item = Item.random()", "def move_up ( self ):\n list, index = self.get_info()\n self.value = (list[:index-1] + [ list[index], list[index-1] ] + \n list[index+1:])", "def _advance(self):\n self._current += 1", "def incr_min_step( bgn , end , stepSize ):\n # NOTE: The actual step size will be the size that produces an evenly-spaced list of trunc( (end - bgn) / stepSize ) elements\n return np.linspace( bgn , end , num = trunc( (end - bgn) / stepSize ) , endpoint=True )", "def insert(self, e):\n try:\n self.vals[e] += 1\n except:\n self.vals[e] = 1", "def step_forward(self):", "def NewStartingIndex(self) -> int:", "def _insert_action_before(self, idx, name, value):\n i = list(self.pipeline.index).index(idx)\n part1 = self.pipeline[0:i]\n new_item = pd.Series([value], index=[name])\n part2 = self.pipeline[i:]\n self.pipeline = pd.concat([part1, new_item, part2])", "def insert(self, index, value):\n # check the validity of index\n if index < 0 or index > self.n: # larger than no. of items\n print(\"Index Error; please input valid index\")\n return\n # if index==0, same as push_front\n if index==0:\n self.push_front(value)\n return\n # else,\n new_node = Node(value)\n temp_node = self.head\n for _ in range(index-1):\n temp_node = temp_node.next # traverse the list\n new_node.next = temp_node.next # temp_node is index-1 node\n temp_node.next = new_node\n self.n += 1", "def move(self, config):\n if config['next_move'] == 0:\n config['stack'].append(config['buffer'].pop(0))\n elif config['next_move'] == 1:\n config['pred_tree'][config['stack'][-2]] = config['stack'][-1]\n del config['stack'][-2]\n elif config['next_move'] == 2:\n config['pred_tree'][config['stack'][-1]] = config['stack'][-2]\n del config['stack'][-1]\n elif config['next_move'] == 3:\n config['buffer'].insert(0, config['stack'].pop(-2))\n return config", "def _advance(self):\n self._prev, self._current = self._current, abs(self._prev - self._current)", "def nextNumberOfResults(self, N=10):\n self.start += self.N\n self.N = N", "def __step(self, p):\n action = self.__action(p)\n temp_state = self.state\n\n if self.state == 0:\n if action == 1:\n self.state += 1\n elif self.state == 1:\n if action == 1:\n self.state -= 1\n else:\n self.state += 1\n else:\n if action == 1:\n self.state += 1\n else:\n self.state -= 1\n \n self.trajectory.append([temp_state, action, self.__reward(self.state)])", "def test_move_dropped_steps_greater_than_move(self):\n player = ss.LazyPlayer(dropped_steps=3)\n random.seed(2)\n player.move()\n random.seed(2)\n player.move()\n assert player.position == 40" ]
[ "0.6520088", "0.6520088", "0.61909217", "0.6150601", "0.61039525", "0.6089643", "0.60866725", "0.6062686", "0.6013688", "0.59797156", "0.59569323", "0.58549404", "0.58222437", "0.5809769", "0.5773611", "0.5759914", "0.57056457", "0.56964505", "0.5682795", "0.5681156", "0.56596935", "0.56340027", "0.56232613", "0.5599977", "0.5593141", "0.55901855", "0.55773175", "0.5576082", "0.55697036", "0.555561", "0.5544915", "0.5539345", "0.5528436", "0.5521636", "0.55076975", "0.5502451", "0.5498891", "0.5485389", "0.54725474", "0.54725474", "0.5465003", "0.5435032", "0.5429472", "0.5422976", "0.54085124", "0.54065937", "0.5393952", "0.53932714", "0.53908813", "0.53729093", "0.5362841", "0.5362841", "0.535814", "0.5357614", "0.53544503", "0.5348074", "0.53295255", "0.5325061", "0.53242767", "0.532375", "0.5310757", "0.5308691", "0.5308348", "0.5308275", "0.530661", "0.5303774", "0.5296112", "0.52958655", "0.5295503", "0.52910674", "0.5285481", "0.52851325", "0.52812064", "0.5281015", "0.5279151", "0.5273241", "0.5272312", "0.5268466", "0.5266004", "0.52604556", "0.5256092", "0.52547777", "0.5250042", "0.52495474", "0.524883", "0.52485275", "0.524577", "0.5243463", "0.52429974", "0.52400535", "0.5235289", "0.5233779", "0.5219878", "0.5219372", "0.52186096", "0.521583", "0.52110726", "0.5209544", "0.52081597", "0.5204105" ]
0.5482471
38
Solution to the problem
def solution(data): lock = Spinlock(data) return lock.process(2017)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve(self):", "def solve(self):\n pass", "def solve(self):\n pass", "def solve(self):\n ...", "def problem_298():\n pass", "def solvate(self):\n\n pass", "def solution(s):", "def get_sol(self):", "def test_get_solution(self):\n pass", "def solution(self) -> State:", "def exercise_b2_106():\r\n pass", "def _optimise(self):\n pass", "def solve(self):\n raise NotImplementedError(\"This method needs to be implemented.\")", "def solution(self):\n return [(\"the\", 1579644)] * 100", "def exercise_b2_113():\r\n pass", "def exercise_b2_107():\r\n pass", "def exercise_b2_53():\r\n pass", "def task4_1(self):\n\n pass", "def exercise_b2_52():\r\n pass", "def solveOneStep(self):\n ### Student code goes here\n return True", "def exo2():", "def exercise_b2_70():\r\n pass", "def exercise_b2_82():\r\n pass", "def exercise():\n pi_good = get_pdb_inputs(pdb_str=pdb_str_answer, restraints=False)\n map_data = get_map(xrs=pi_good.xrs)\n xrs_good = pi_good.xrs.deep_copy_scatterers()\n pi_good.ph.write_pdb_file(file_name=\"answer.pdb\",\n crystal_symmetry=xrs_good.crystal_symmetry())\n #\n pi_poor = get_pdb_inputs(pdb_str=pdb_str_poor, restraints=True)\n pi_poor.ph.write_pdb_file(file_name=\"poor.pdb\")\n xrs_poor = pi_poor.xrs.deep_copy_scatterers()\n #\n d = xrs_good.distances(other=xrs_poor)\n print(d.min_max_mean().as_tuple())\n assert flex.max(d)>2\n assert flex.mean(d)>0.7\n #\n xrs_refined = xrs_poor\n for i in range(3):\n ero = individual_sites.easy(\n map_data = map_data,\n xray_structure = xrs_refined,\n pdb_hierarchy = pi_poor.ph,\n geometry_restraints_manager = pi_poor.grm)\n xrs_refined = ero.xray_structure\n # comapre\n d = xrs_good.distances(other=xrs_refined)\n print(d.min_max_mean().as_tuple())\n assert flex.max(d)<0.15\n assert flex.mean(d)<0.03\n ero.pdb_hierarchy.write_pdb_file(file_name=\"refined.pdb\",\n crystal_symmetry=xrs_good.crystal_symmetry())", "def exercise_b2_69():\r\n pass", "def solve(self):\n \n raise NotImplementedError(\"not implemented!\")", "def problem_1b():\n # BEGIN_YOUR_ANSWER (our solution is 1 lines of code, but don't worry if you deviate from this)\n return 4\n # END_YOUR_ANSWER", "def apply(self) -> None:", "def apply(self) -> None:", "def print_solution():\n pass", "def evaluate(self):\n #fac o lista cu toate perechile si vad daca se repeta vreuna (pana acum)\n nr=0\n \n pairs = []\n for i in range(0,self.__size):\n for j in range(0, self.__size):\n if self.__solution[i] != [] and self.__solution[i+self.__size] != [] : #sa am de unde face perechea\n p=[]\n p.append(self.__solution[i][j])\n p.append(self.__solution[i+self.__size][j])\n pairs.append(p)\n for p in pairs:\n if pairs.count(p) == 1:\n nr += 1\n\n return self.__size*self.__size - nr + 1 # pun acel +1 ca sa nu fie 0 niciodata -> ca sa nu am probleme la impartire\n # la 0 mai incolo\n #return nr", "def exercise_b2_27():\r\n pass", "def solution(self):\n return [(\"the\", 1561900)] * 100", "def apply(self):", "def task4(self):\n\n pass", "def exercise_b2_98():\r\n pass", "def solution(data):\n\t\tif data:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0", "def problem2(self, s):\n \n points = self.neighbor(100, 10, s.exhaustive_search)\n points += self.neighbor(10, 100, s.exhaustive_search)\n points += 1\n\n _testDriver.get_code(s.exhaustive_search)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n\n return points", "def exercise_b2_26():\r\n pass", "def solution(self):\n return [(\"simple 1\", 1.),\n (\"simple 2\", 1.),\n (\"simple 3\", 1.),\n (\"simple 4\", 1.),\n (\"simple 5\", 1.),\n (\"simple 10\", 1.),\n (\"simple 15\", 1.),\n (\"thai 1\", 1.),\n (\"thai 2\", 1.),\n (\"thai 3\", 1.),\n (\"thai 4\", 1.),\n (\"thai 5\", 1.),\n (\"thai 10\", 1.),\n (\"thai 15\", 1.),\n ]", "def exercise_b2_95():\r\n pass", "def _perturbInPlaceHard(self):\n die", "def compute_debug(self):", "def exercise_b2_93():\r\n pass", "def fix_bug(self):\n self.bugged = False\n self.error_prob = 0.0", "def exercise_b2_43():\r\n pass", "def invalidproblem(currency_from, currency_to, amount_from):\n x = analysis(currency_from, currency_to, amount_from)\n return(oput(x))", "def exercise_b2_39():\r\n pass", "def exercise_b2_56():\r\n pass", "def violated(self) -> bool:\n ...", "def main():\n problem1()\n return 0", "def mezclar_bolsa(self):", "def calculate_output(self):", "def algorithm_loop(self):", "def _solve(self) -> CasADiArrayType:\n pass", "def question_6():\n return None", "def check():", "def question_7():\n return None", "def Problem10():\n return 'When yield strength in tension and compression are not equal'", "def find_solution(self):\n print(\"\\nFinding ICTS Solution...\")\n ######### Fill in the ICTS Algorithm here #########\n result = self.stat_tracker.time(\"time\", lambda: self.bfs())\n if result == -1:\n self.stat_tracker.stats['time'] = -1\n return []\n self.stat_tracker.write_stats_to_file(self.stat_tracker.get_results_file_name())\n return result\n ###################################################", "def pingjiazhibiao(result):\n import math\n list_ed_normal = []\n list_es_normal = []\n list_ed_true = []\n list_es_true = []\n # these definations are for statistic\n ed_pred_all, es_pred_all,ed_true_all,es_true_all,ed_match,es_match,ed_normal,es_normal,ed_nomiss,es_nomiss= 0,0,0,0,0,0,0,0,0,0\n total_error_ed,total_error_es = 0,0\n sample_missimg_num = 0\n a4cdDict = {}\n a4csDict = {}\n for i in range(-5,7):\n a4cdDict[i] = 0\n a4csDict[i] = 0\n for i in result:\n pred = i[0]\n ed_pred = pred[0]\n es_pred = pred[1]\n if ed_pred == [] or es_pred == []:\n sample_missimg_num += 1\n true = i[1]\n ed_true = true[0]\n es_true = true[1]\n\n # avoid many to one\n ed_pred.sort()\n es_pred.sort()\n deleteAmong10frames(ed_pred)\n deleteAmong10frames(es_pred)\n \n for j in ed_pred:\n ed_pred_all += 1\n for t in ed_true:\n if math.fabs(j - t) < 6:\n ed_normal += 1\n total_error_ed += math.fabs(t - j)\n a4cdDict[j-t]+=1\n break\n # all - normal = FP\n # normal is TP\n a4cdDict[6] = ed_pred_all-ed_normal\n\n for j in es_pred:\n es_pred_all += 1\n for t in es_true:\n if math.fabs(j - t) < 6:\n es_normal += 1\n total_error_es += math.fabs(t - j)\n a4csDict[j-t]+=1\n break\n a4csDict[6] = es_pred_all-es_normal\n for j in ed_true:\n ed_true_all += 1\n for t in ed_pred:\n if math.fabs(t - j) < 6:\n ed_nomiss += 1\n break\n\n for j in es_true:\n es_true_all += 1\n for t in es_pred:\n if math.fabs(t - j) < 6:\n es_nomiss += 1\n break\n # aFD precision recall \n ed_result = total_error_ed / ed_normal,(ed_normal / ed_pred_all),(ed_nomiss / ed_true_all)\n es_result = total_error_es / es_normal,(es_normal / es_pred_all),(es_nomiss / es_true_all)\n return ed_result,a4cdDict, es_result,a4csDict, sample_missimg_num / len(result)", "def question_11():\n return None", "def main():\n\n # first lets test with a already created csp:\n csp = create_map_csp()\n solution = backtracking(csp)\n #solution2,assigned = minimum_remaining_values(csp)\n print(solution)\n #print assigned\n\n # and now with our own generated sudoku CSP\n \"\"\"sudokus = read_sudokus()\n csp = create_sudoku_csp(sudokus[1])\n solution = backtracking(csp)\n print sudoku_csp_to_array(solution)\n\"\"\"", "def solve(self):\n if not self.solvable:\n print('Suduko not Solvable')\n return False\n res=self.back(0, 0)\n # if self.a[0][0]!=0:\n # res=self.back(0, 1)\n # else:\n # for i in range(1, 10):\n # self.a[0][0]=i\n # res=self.back(0, 1)\n # if res:\n # break\n if res:\n self.check_if_solvable()\n print(\"Sudoku Solved!\")\n print(self.a)\n return self.a\n else: print(\"Not Solvable\")\n return False", "def solve(self) -> jnp.ndarray:\n pass", "def solve_puzzle(self):\r\n \r\n counter = 0\r\n rows = self._height-1\r\n cols = self._width-1\r\n # print rows, cols\r\n # print 'The greed has %s rows and %s coloumn indexes' %(rows, cols) \r\n solution_move = ''\r\n if self.get_number(0,0) == 0 and \\\r\n self.get_number(0,1) == 1:\r\n # print 'Congrads Puxxle is Aolved at start!!!!!'\r\n return ''\r\n #appropriate_number = (self._height * self._width) - 1\r\n appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'First appropriate_number=',appropriate_number\r\n # print \"Grid first tile that we will solwing has value =\", self._grid[rows][cols]\r\n \r\n while counter < 300:\r\n counter +=1\r\n # print self\r\n #appropriate_number = (rows+1) * (cols+1) -1\r\n # print 'Appropriate number in loop=',appropriate_number\r\n # print 'We are solving %s index_row and %s index_col' %(rows, cols) \r\n ####Case when we use solve_interior_tile\r\n if rows > 1 and cols > 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n cols -= 1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solving interior tile', (rows, cols)\r\n solution_move += self.solve_interior_tile(rows, cols)\r\n # print 'Solution move=', solution_move\r\n cols -= 1\r\n #### Case when we use solve_col0_tile\r\n elif rows > 1 and cols == 0:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n cols = self._width-1\r\n appropriate_number -=1\r\n else:\r\n # print 'We are solwing tile 0 in row', rows\r\n # print 'Appropriate number here ='\r\n solution_move += self.solve_col0_tile(rows)\r\n # print 'Solution move=', solution_move\r\n rows -=1\r\n cols = self._width-1\r\n\r\n\r\n #### Cases when we use solve_row0_tile\r\n elif rows == 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows -= 1\r\n #cols = self._width-1\r\n appropriate_number -= self._width\r\n\r\n else:\r\n # print 'Solving upper 2 rows right side'\r\n solution_move += self.solve_row1_tile(cols)\r\n rows -=1\r\n appropriate_number -= self._width\r\n #### Cases when we use solve_row1_tile \r\n if rows < 1 and cols > 1:\r\n if self._grid[rows][cols] == appropriate_number:\r\n # print 'This tile is already solved!!!'\r\n rows += 1\r\n cols -= 1\r\n appropriate_number +=self._width-1\r\n else:\r\n # print '(1,J) tile solved, lets solwe tile (0,j) in tile',(rows,cols)\r\n # print 'Greed after move solve_row1_tile'\r\n # print self\r\n solution_move += self.solve_row0_tile(cols)\r\n rows +=1\r\n cols -=1\r\n appropriate_number +=self._width-1\r\n\r\n\r\n #### Case when we use solve_2x2\r\n elif rows <= 1 and cols <= 1:\r\n # print 'We are solving 2x2 puzzle'\r\n solution_move += self.solve_2x2()\r\n if self._grid[0][0] == 0 and \\\r\n self._grid[0][1] == 1:\r\n # print 'Congrads Puxxle is SOLVED!!!!!'\r\n break\r\n\r\n\r\n\r\n\r\n if counter > 100:\r\n # print 'COUNTER BREAK'\r\n break\r\n # print solution_move, len(solution_move)\r\n return solution_move\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for row in solution_greed._grid[::-1]:\r\n # print solution_greed._grid\r\n # print 'Row =',row\r\n \r\n # if solution_greed._grid.index(row) > 1:\r\n # print \"Case when we solwing Interior and Tile0 part\"\r\n \r\n\r\n # for col in solution_greed._grid[solution_greed._grid.index(row)][::-1]:\r\n # print 'Coloumn value=', col\r\n #print row[0]\r\n # if col !=row[0]:\r\n # print 'Case when we use just Interior tile solution'\r\n # print solution_greed._grid.index(row)\r\n # print row.index(col)\r\n \r\n # solution += solution_greed.solve_interior_tile(solution_greed._grid.index(row) , row.index(col))\r\n # print 'Solution =', solution\r\n # print self \r\n # print solution_greed._grid\r\n # elif col ==row[0]:\r\n # print 'Case when we use just Col0 solution'\r\n\r\n # else:\r\n # print 'Case when we solwing first two rows'\r\n\r\n #return \"\"\r", "def question_4():\n return None", "def test_schwefel222(self):\n fun = get_problem('schwefel222', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def calibration(self) -> int:", "def question_9():\n return None", "def uniformCostSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\r\n\r\n\tutil.raiseNotDefined()", "def task5(self):\n\n pass", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def answer(self) -> bool:", "def test_schwefel221(self):\n fun = get_problem('schwefel221', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def problem():\n for a in range(1, 380):\n for b in range(a):\n if a + b + (a**2 + b**2)**0.5 == 1000:\n return int(a * b * (a**2 + b**2)**0.5)", "def substantiate():", "def q1(puzzle):\n mysudoku = build_csp(puzzle)\n solution = mysudoku.backtracking_search()\n return solution, mysudoku", "def solve(num_wizards, num_constraints, wizards, constraints):\n\n # print(num_wizards)\n # print(num_constraints)\n # print(wizards)\n # print(constraints)\n # node_set = set(wizards)\n \n\n\n def cost(sol,num_constraints,constraints):\n constraints_satisfied = 0\n constraints_failed = []\n output_ordering_map = {k: v for v, k in enumerate(sol)}\n for c in constraints:\n\n m = output_ordering_map # Creating an alias for easy reference\n\n wiz_a = m[c[0]]\n wiz_b = m[c[1]]\n wiz_mid = m[c[2]]\n\n if (wiz_a < wiz_mid < wiz_b) or (wiz_b < wiz_mid < wiz_a):\n constraints_failed.append(c)\n else:\n constraints_satisfied += 1\n return num_constraints - constraints_satisfied\n\n def neighbors(sol):\n wiz1 = random.randint(0,num_wizards-1)\n wiz2 = random.randint(0,num_wizards-1)\n\n new_sol = copy.copy(sol)\n temp = new_sol[wiz1]\n new_sol[wiz1] = new_sol[wiz2]\n new_sol[wiz2] = temp\n \n return new_sol\n\n def acceptance_probability(old_cost,new_cost,T):\n exponent = (old_cost - new_cost) / T\n \n try:\n ans = math.exp(exponent)\n except OverflowError:\n ans = float('inf')\n return ans\n\n\n def anneal(solution, num_constraints, constraints):\n old_cost = 0\n new_cost = 0\n old_cost = cost(solution,num_constraints,constraints)\n T = 1.0\n T_min = 0.000001\n alpha = 0.98\n while T > T_min:\n i = 1\n while i <= 1000:\n new_solution = neighbors(solution)\n new_cost = cost(new_solution,num_constraints,constraints)\n if new_cost == 0:\n return new_solution,new_cost\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T*alpha\n return solution, old_cost\n\n s = copy.copy(wizards)\n random.shuffle(s)\n ret = anneal(s,num_constraints,constraints)\n \n for i in range(10):\n if ret[1] == 0:\n break\n random.shuffle(s)\n new_ret = anneal(s,num_constraints,constraints)\n print(i)\n if new_ret[1] < ret[1]:\n ret = new_ret\n print(\"constraints failed: {0}\".format(ret[1]))\n return ret[0]", "def result(self):", "def result(self):", "def problem_1a():\n # BEGIN_YOUR_ANSWER (our solution is 1 lines of code, but don't worry if you deviate from this)\n return 2/5\n # END_YOUR_ANSWER", "def _calc_corrections(self): \n searchIter= self._niter-1\n while searchIter > 0:\n trySavefilename= self._createSavefilename(searchIter)\n if os.path.exists(trySavefilename):\n trySavefile= open(trySavefilename,'rb')\n corrections= sc.array(pickle.load(trySavefile))\n trySavefile.close()\n break\n else:\n searchIter-= 1\n if searchIter == 0:\n corrections= sc.ones((self._npoints,2))\n for ii in range(searchIter,self._niter):\n if ii == 0:\n currentDF= self._dftype(surfaceSigma=self._surfaceSigmaProfile,\n beta=self._beta)\n else:\n currentDF= self._dftype(surfaceSigma=self._surfaceSigmaProfile,\n beta=self._beta,\n corrections=corrections,\n npoints=self._npoints,\n rmax=self._rmax,\n savedir=self._savedir,\n interp_k=self._interp_k)\n newcorrections= sc.zeros((self._npoints,2))\n for jj in range(self._npoints):\n thisSurface= currentDF.surfacemass(self._rs[jj],\n use_physical=False)\n newcorrections[jj,0]= currentDF.targetSurfacemass(self._rs[jj],use_physical=False)/thisSurface\n newcorrections[jj,1]= currentDF.targetSigma2(self._rs[jj],use_physical=False)*thisSurface\\\n /currentDF.sigma2surfacemass(self._rs[jj],\n use_physical=False)\n #print(jj, newcorrections[jj,:])\n corrections*= newcorrections\n #Save\n picklethis= []\n for arr in list(corrections):\n picklethis.append([float(a) for a in arr])\n save_pickles(self._savefilename,picklethis) #We pickle a list for platform-independence)\n return corrections", "def C(v,securite):\n to_return = set()\n x,y = l[v][0],l[v][1]\n a,b = id_case(x,y) #on recupere la case ou se trouve le disque qu'on test\n voisinage = set(cases[a,b]) #on recupere la liste du voisinage (pas forcement contact)\n #4\n #012\n #345\n #678 \n if a>100:\n voisinage = add_list(voisinage,cases[a-4*rayon,b]) #3\n if b>200:\n voisinage = add_list(voisinage,cases[a-4*rayon,b-4*rayon]) #0\n voisinage = add_list(voisinage,cases[a,b-4*rayon]) #1\n if b<600:\n voisinage = add_list(voisinage,cases[a-4*rayon,b+4*rayon]) #6\n voisinage = add_list(voisinage,cases[a,b+4*rayon]) #7\n if a<1100-4*rayon:\n voisinage = add_list(voisinage,cases[a+4*rayon,b]) #5\n if b>200:\n voisinage = add_list(voisinage,cases[a+4*rayon,b-4*rayon]) #2\n voisinage = add_list(voisinage,cases[a,b-4*rayon]) #1\n if b<600:\n voisinage = add_list(voisinage,cases[a+4*rayon,b+4*rayon]) #8\n voisinage = add_list(voisinage,cases[a,b+4*rayon]) #7\n \n #On ajoute plusieurs fois le meme a un ensemble -> pas grave\n for i in voisinage:\n xb,yb = l[i][0],l[i][1]\n if 0<sqrt((x-xb)**2+(y-yb)**2)<=2*rayon+securite:\n to_return.add(i)\n return to_return", "def es_satisfecho_por(self, candidata):", "def question_5():\n return None", "def solve(self):\n print(\"Problem %s Answer: %s\" % (self.number, self.solution()))", "def calculate(self):", "def solve(self):\n for x in range(9):\n for y in range(9):\n if self.arr[x][y] == 0:\n for value in range(1,10):\n if self.is_possible_value(x,y,value):\n self.arr[x][y] = value\n self.solve()\n self.arr[x][y] = 0 \n return\n print(np.matrix(self.arr))", "def problem():\n\n print 'problem #27'\n\n l = 0\n m_a = 0\n m_b = 0\n for a in xrange(-1000, 1000):\n for b in xrange(-1000, 1000):\n p = len(check(a, b))\n if p > l:\n l = p\n m_a = a\n m_b = b\n\n print 'the product of coefficients is %s' % (m_a * m_b)", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move", "def CL(self):", "def problem4() -> None:\n \n # Obtain the dataframe containing lines from the articles and the keyword\n # with which the article is associated.\n try:\n kw_df = collect_keyword_dataframe(collect_all=COLLECT_ALL) \n except FileNotFoundError:\n raise Exception(\"[ERROR] The 'articles' folder was not found. Please \"\n \"ensure that the functions for problems 1 and 2 have \"\n \"been applied first.\")\n \n keywords, corpus = separate_keywords_corpus(kw_df)\n \n # Complete the preprocessing for the corpus for comparison's sake\n w2vp = W2VPreprocessor(keywords, corpus)\n \n # Display the counts of sentences collected associated with each keyword\n keywords_countplot(kw_df)\n \n # Create displots for the lengths of sentences in the corpus\n displot_all_sentence_lengths(kw_df)\n displot_sentence_lengths_per_keyword(kw_df)\n \n # Visualise the semantic distances\n try:\n distances = read_in_distance_matrix()\n except FileNotFoundError:\n raise Exception(\"[ERROR] The distance matrix excel file was could not \"\n \"be found! Please ensure the methods for the \"\n \"preceding problems have been run first.\")\n visualise_distances(distances)\n \n # Visualise word frequency before and after preprocessing\n visualise_initial_most_frequent_words(kw_df)\n visualise_preprocessed_most_frequent_words(w2vp.corpus)", "def test_modified_schwefel(self):\n fun = get_problem('modified_schwefel', self.dimension, -100, 100)\n self.assertAlmostEqual(fun(self.array10), 6.9448853328785844, delta=350)", "def exercise_2b():\n\n return", "def evaltr(x_solution): \n \n large = 10.0**30\n pred = np.zeros(cfg.ntrain)\n e0 = 0.0 # mean of observed values\n y=0.0\n for i in range(cfg.ntrain): # Computation of correct piece\n e0 += cfg.a_unscaled[i][-1]\n pind = 0\n ipbest = 0\n pbest = -large # for max\n \n for j1 in range(cfg.nomax):\n ipmin=pind\n pmin=large # for min\n for _ in range(cfg.jk[j1]):\n piece=x_solution[(pind+1)*cfg.nfea-1] \n for j3 in range(cfg.nfea-1): #\n piece += x_solution[pind*cfg.nfea+j3]*cfg.a_unscaled[i][j3]\n if piece < pmin:\n ipmin = pind\n pmin = piece\n pind += 1 \n \n if pmin > pbest:\n ipbest = ipmin\n pbest = pmin\n \n pred[i] = x_solution[(ipbest+1)*cfg.nfea-1] # Computation of prediction\n for j1 in range(cfg.nfea-1):\n pred[i] += x_solution[ipbest*cfg.nfea+j1]*cfg.a_unscaled[i][j1]\n y += pred[i]\n \n y = y/cfg.ntrain \n e0 = e0/cfg.ntrain\n \n # Computation of indices\n rmse = 0.0\n mae = 0.0\n e1 = 0.0\n for i in range(cfg.ntrain):\n rmse += (pred[i]-cfg.a_unscaled[i][-1])**2\n mae += np.abs(pred[i]-cfg.a_unscaled[i][-1]) \n e1 += (cfg.a_unscaled[i][-1] - e0)**2\n ce = 1.0 - rmse/e1 \n rmse = np.sqrt(rmse/cfg.ntrain)\n mae = mae/cfg.ntrain \n\n if cfg.ntrain > 1:\n sx=0.0\n sy=0.0\n rcor=0.0\n for i in range(cfg.ntrain):\n sx += (pred[i]-y)**2\n sy += (cfg.a_unscaled[i][-1]-e0)**2 \n rcor += (pred[i]-y) * (cfg.a_unscaled[i][-1]-e0) \n\n r = rcor/np.sqrt(sx*sy)\n \n return rmse,mae,ce,r", "def task3(self):\n\n pass", "def pulp_smash():", "def get_solution(self):\r\n return self.solution" ]
[ "0.7423277", "0.7013315", "0.7013315", "0.69836867", "0.6830721", "0.6752851", "0.64645183", "0.6391518", "0.63800526", "0.63478065", "0.61165357", "0.6100315", "0.6074178", "0.60716605", "0.60713553", "0.60587716", "0.60174394", "0.5986508", "0.59821045", "0.59797627", "0.5967093", "0.5934827", "0.591364", "0.5878708", "0.5861566", "0.5854378", "0.58201444", "0.5794336", "0.5794336", "0.5794063", "0.5759302", "0.57381874", "0.57212067", "0.5698832", "0.56936604", "0.5671661", "0.5664656", "0.5630193", "0.56234264", "0.5610909", "0.5600643", "0.55946064", "0.55926317", "0.5581195", "0.55568725", "0.55346555", "0.5532026", "0.55267286", "0.55247617", "0.5516196", "0.55125594", "0.55122393", "0.5511429", "0.5511409", "0.54997367", "0.5497738", "0.5471568", "0.5471303", "0.54616904", "0.54557544", "0.54526407", "0.54433656", "0.54431623", "0.5438066", "0.5425823", "0.5425416", "0.54199696", "0.541844", "0.54177547", "0.5417652", "0.5414851", "0.5400242", "0.53993475", "0.53993475", "0.53993475", "0.5388982", "0.53813094", "0.5374739", "0.5374465", "0.5367369", "0.536511", "0.53648484", "0.53648484", "0.5355213", "0.5355146", "0.5347808", "0.5345313", "0.53436315", "0.5341634", "0.5322449", "0.5318221", "0.53170025", "0.5305443", "0.53025347", "0.53024906", "0.52993226", "0.5296881", "0.5296599", "0.52802455", "0.52792335", "0.52791667" ]
0.0
-1
Given the tile location (x,y) and zoom level z, fetch the corresponding tile from the server and save it to the location specfied in fpath. Note, this saves just one tile; usually, want to use `positive_dataset` instead.
def save_tile(x,y,z,fpath): UA = "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/77.0" tile_url = f"https://{random.choice('abc')}.tile.openstreetmap.org/{z}/{x}/{y}.png" # cmd = f"wget --user-agent='please download' -O {fpath} {url}" if os.path.exists(fpath): print(f"Already have tile {fpath}!") return 0 if os.path.isdir(fpath): raise ValueError(f"requested path {fpath} exists and is a directory!") try: res = rq.get( url=tile_url, headers={'User-Agent': UA} ) status = res.status_code if status == 200: with open(fpath,'wb') as of: of.write(res.content) return 0 else: print(f"Error: response {status} from server:\n{res.reason}") return status except Exception as e: print(f"Error getting tile: {e}") return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_tile(map_layer, zoom, x, y):\n try:\n tile_url = map_layer.get_tile_url(zoom, x, y)\n tmp_file, headers = urllib.request.urlretrieve(tile_url)\n return (x, y), tmp_file\n except URLError as e:\n app.logger.info(\"Error downloading tile x={}, y={}, z={} for layer {}: {}\".format(\n x, y, zoom, map_layer, e.reason))\n return (x, y), pkg_resources.resource_filename(\"geos\", \"static/empty_tile.png\")", "def tile(self, z, x, y_tms):\n logger.debug(_(\"Download tile %s\") % ((z, x, y_tms),))\n # Render each keyword in URL ({s}, {x}, {y}, {z}, {size} ... )\n size = self.tilesize\n s = self.tiles_subdomains[(x + y_tms) % len(self.tiles_subdomains)];\n y_osm = (2**int(z) - 1) - int(y_tms)\n try:\n url = self.tiles_url.format(**locals())\n except KeyError, e:\n raise DownloadError(_(\"Unknown keyword %s in URL\") % e)\n logger.debug(_(\"Retrieve tile at %s\") % url)\n r = DOWNLOAD_RETRIES\n sleeptime = 1\n while r > 0:\n try:\n request = urllib2.Request(url)\n for header, value in self.headers.items():\n request.add_header(header, value)\n stream = urllib2.urlopen(request)\n assert stream.getcode() == 200\n return stream.read()\n except (AssertionError, IOError), e:\n logger.debug(_(\"Download error, retry (%s left). (%s)\") % (r, e))\n r -= 1\n time.sleep(sleeptime)\n # progressivly sleep longer to wait for this tile\n if (sleeptime <= 10) and (r % 2 == 0):\n sleeptime += 1 # increase wait\n raise DownloadError(_(\"Cannot download URL %s\") % url)", "def download_tile(self, xtile, ytile):\n location = 'http://maps.six.nsw.gov.au/arcgis/rest/services/public/NSW_Imagery/MapServer/tile/'\n destination = 'downloaded_tiles/'\n save_name = str(self.zoom_level) + '_' + str(xtile) + '_' + str(ytile)\n tile_url = location + save_name.replace('_', '/')\n tile = requests.get(tile_url, stream=True)\n with open(destination + save_name + '.png', 'wb') as out_file:\n tile.raw.decode_content = True\n shutil.copyfileobj(tile.raw, out_file)\n tilepng = png.Reader(file=tile.raw)\n # shutil.copyfileobj(tilepng, out_file)\n del tile", "def tile(self, (z, x, y)):\n output = self.cache.read((z, x, y))\n if output is None:\n # logger.info(_(\"TilesManager.tile calling sources.tile: \") )\n pass\n output = self.reader.tile(z, x, y)\n if output is None:\n return None\n # Blend layers\n if len(self._layers) > 0:\n logger.debug(_(\"Will blend %s layer(s)\") % len(self._layers))\n output = self._blend_layers(output, (z, x, y))\n # Apply filters\n for f in self._filters:\n image = f.process(self._tile_image(output))\n output = self._image_tile(image)\n # Save result to cache\n self.cache.save(output, (z, x, y))\n self.rendered += 1\n return output", "def tile(self, z, x, y):\n logger.debug(_(\"Render tile %s\") % ((z, x, y),))\n mercator = GlobalMercator(False,tilesize,[z])\n return self.render(mercator.tile_bbox((z, x, y)))", "def get_tile(tilefile,level,x,y):\n\t\n\ttf=file(tilefile,\"r\")\n\t\n\ttd=pickle.load(tf)\n\ta=td[(level,x,y)]\n\t\n\ttf.seek(a[0],1)\n\tret=tf.read(a[1])\n\t\n\ttf.close()\n\treturn ret", "def run(tile_x, tile_y, zoom, mbtiles_file):\n conn = sqlite3.connect(mbtiles_file)\n c = conn.cursor()\n c.execute(\n (\"SELECT tile_data FROM tiles WHERE \"\n \"zoom_level=? AND tile_column=? AND tile_row=?\"),\n (zoom, tile_x, tile_y))\n mvt_content = c.fetchone()[0]\n return mvt_content", "def save_tile_img(tif, xyz, dataset, tile_size, region, zone, save_path, display=False):\n \n prefix = f'{region}{zone}{dataset}_'\n x,y,z = xyz\n tile, mask = rt_main.tile(tif, x,y,z, tilesize=tile_size)\n if display: \n plt.imshow(np.moveaxis(tile,0,2))\n plt.show()\n \n skimage.io.imsave(f'{save_path}/{prefix}{z}_{x}_{y}.png',np.moveaxis(tile,0,2), check_contrast=False)", "def tile_to_url(tile_x, tile_y, tile_z):\n subdomain = random.choice([\"a\", \"b\", \"c\"])\n resource_url = \"https://{0}.tile.openstreetmap.org/{1}/{2}/{3}.png\"\n return resource_url.format(subdomain, tile_z, tile_x, tile_y)", "def get_tile(geojson, base_url):\n # open geojson and get tile index\n with open(geojson, 'r') as data:\n tile_geojson = json.load(data)\n features = tile_geojson[\"features\"]\n # get the tile index as x, y, z formats.\n xyz = [features[i]['properties']['tiles'] for i in range(len(features))]\n\n # create tile folder\n tiles_folder = op.splitext(geojson)[0]\n if not op.isdir(tiles_folder):\n makedirs(tiles_folder)\n\n # download and get the list of tiles\n tiles = list()\n for i in range(len(xyz)):\n x=str(xyz[i][0])\n y=str(xyz[i][1])\n z=str(xyz[i][2])\n url = base_url.replace('{x}', x).replace('{y}', y).replace('{z}', z)\n o = urlparse(url)\n _, image_format = op.splitext(o.path)\n tile_bn =\"{}-{}-{}{}\".format(z, x, y,image_format)\n r = requests.get(url)\n tile= op.join(tiles_folder, tile_bn)\n tiles.append(tile)\n with open(tile, 'wb')as w:\n w.write(r.content)\n return tiles", "def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + dx*TILE_SIZE\n offsety = BORDER_WIDTH + dy*TILE_SIZE\n view = image.view(offsetx, offsety, TILE_SIZE, TILE_SIZE)\n if imgtype:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix), imgtype)\n else:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix))", "def tile(self, (z, x, y)):\n output = self.cache.read((z, x, y))\n if output is None:\n output = self.reader.tile(z, x, y)\n # Blend layers\n if len(self._layers) > 0:\n logger.debug(_(\"Will blend %s layer(s)\") % len(self._layers))\n output = self._blend_layers(output, (z, x, y))\n # Apply filters\n for f in self._filters:\n image = f.process(self._tile_image(output))\n output = self._image_tile(image)\n # Save result to cache\n self.cache.save(output, (z, x, y))\n self.rendered += 1\n return output", "def get_tile(self, tile, as_png=False, overwrite=True):\n zoom, row, col = tile\n output_path = self.config[\"output_name\"]\n zoomdir = os.path.join(output_path, str(zoom))\n rowdir = os.path.join(zoomdir, str(row))\n image_path = os.path.join(rowdir, str(col)+\".png\")\n if os.path.isfile(image_path):\n return send_file(image_path, mimetype='image/png')\n else:\n try:\n self.save_tile(tile)\n except:\n print \"tile not available\", tile\n size = self.tile_pyramid.tile_size\n empty_image = Image.new('RGBA', (size, size))\n return empty_image.tobytes()\n return send_file(image_path, mimetype='image/png')", "def to_xyz_tiles(\n self, root: str, tile_size: int, zoom_levels: list, driver=\"GTiff\", **kwargs\n ):\n mName = os.path.normpath(os.path.basename(root))\n\n def create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n def tile_window(shape, px):\n \"\"\"Yield (left, upper, width, height).\"\"\"\n nr, nc = shape\n lu = product(range(0, nc, px), range(0, nr, px))\n\n ## create the window\n for l, u in lu:\n h = min(px, nr - u)\n w = min(px, nc - l)\n yield (l, u, w, h)\n\n vrt_fn = None\n prev = 0\n nodata = self.nodata\n obj = self._obj.copy()\n zls = {}\n for zl in zoom_levels:\n diff = zl - prev\n pxzl = tile_size * (2 ** (diff))\n\n # read data from previous zoomlevel\n if vrt_fn is not None:\n obj = xr.open_dataarray(vrt_fn, engine=\"rasterio\").squeeze(\n \"band\", drop=True\n )\n x_dim, y_dim = obj.raster.x_dim, obj.raster.y_dim\n obj = obj.chunk({x_dim: pxzl, y_dim: pxzl})\n dst_res = abs(obj.raster.res[-1]) * (2 ** (diff))\n\n if pxzl > min(obj.shape):\n logger.warning(\n f\"Tiles at zoomlevel {zl} smaller than tile_size {tile_size}\"\n )\n\n # Write the raster paths to a text file\n sd = join(root, f\"{zl}\")\n create_folder(sd)\n txt_path = join(sd, \"filelist.txt\")\n file = open(txt_path, \"w\")\n\n for l, u, w, h in tile_window(obj.shape, pxzl):\n col = int(np.ceil(l / pxzl))\n row = int(np.ceil(u / pxzl))\n ssd = join(sd, f\"{col}\")\n\n create_folder(ssd)\n\n # create temp tile\n temp = obj[u : u + h, l : l + w]\n if zl != 0:\n temp = temp.coarsen(\n {x_dim: 2**diff, y_dim: 2**diff}, boundary=\"pad\"\n ).mean()\n temp.raster.set_nodata(nodata)\n\n if driver == \"netcdf4\":\n path = join(ssd, f\"{row}.nc\")\n temp = temp.raster.gdal_compliant()\n temp.to_netcdf(path, engine=\"netcdf4\", **kwargs)\n elif driver in gis_utils.GDAL_EXT_CODE_MAP:\n ext = gis_utils.GDAL_EXT_CODE_MAP.get(driver)\n path = join(ssd, f\"{row}.{ext}\")\n temp.raster.to_raster(path, driver=driver, **kwargs)\n else:\n raise ValueError(f\"Unkown file driver {driver}\")\n\n file.write(f\"{path}\\n\")\n\n del temp\n\n file.close()\n # Create a vrt using GDAL\n vrt_fn = join(root, f\"{mName}_zl{zl}.vrt\")\n gis_utils.create_vrt(vrt_fn, file_list_path=txt_path)\n prev = zl\n zls.update({zl: float(dst_res)})\n del obj\n\n # Write a quick data catalog yaml\n yml = {\n \"crs\": self.crs.to_epsg(),\n \"data_type\": \"RasterDataset\",\n \"driver\": \"raster\",\n \"path\": f\"{mName}_zl{{zoom_level}}.vrt\",\n \"zoom_levels\": zls,\n }\n with open(join(root, f\"{mName}.yml\"), \"w\") as f:\n yaml.dump({mName: yml}, f, default_flow_style=False, sort_keys=False)", "def tile_coords_zoom_and_tileserver_to_url(\n tile_x: int, tile_y: int, tile_z: int, tile_server: dict\n) -> str:\n\n if tile_server[\"name\"] == \"bing\":\n quadKey = tile_coords_and_zoom_to_quadKey(tile_x, tile_y, tile_z)\n url = quadKey_to_Bing_URL(quadKey, tile_server[\"apiKey\"])\n elif tile_server[\"name\"] == \"sinergise\":\n url = tile_server[\"url\"].format(\n key=tile_server[\"apiKey\"],\n x=tile_x,\n y=tile_y,\n z=tile_z,\n layer=tile_server[\"wmtsLayerName\"],\n )\n elif \"maxar\" in tile_server[\"name\"]:\n # maxar uses not the standard TMS tile y coordinate,\n # but the Google tile y coordinate\n # more information here:\n # https://www.maptiler.com/google-maps-coordinates-tile-bounds-projection/\n tile_y = int(math.pow(2, tile_z) - tile_y) - 1\n url = tile_server[\"url\"].format(\n key=tile_server[\"apiKey\"],\n x=tile_x,\n y=tile_y,\n z=tile_z,\n )\n elif \"{-y}\" in tile_server[\"url\"]:\n # this uses not the standard TMS tile y coordinate,\n # but the Google tile y coordinate\n # more information here:\n # https://www.maptiler.com/google-maps-coordinates-tile-bounds-projection/\n tile_y = int(math.pow(2, tile_z) - tile_y) - 1\n url = tile_server[\"url\"].replace(\"{-y}\", \"{y}\")\n url = url.format(\n key=tile_server[\"apiKey\"],\n x=tile_x,\n y=tile_y,\n z=tile_z,\n )\n else:\n url = tile_server[\"url\"].format(\n key=tile_server[\"apiKey\"],\n x=tile_x,\n y=tile_y,\n z=tile_z,\n )\n\n return url", "def save_tiles(df,output_dir,namefunc = None):\n if not isinstance(df,pd.core.frame.DataFrame):\n raise TypeError(\"df must be a pandas DataFrame!\")\n if any(e not in df.columns for e in ('z','x','y')):\n raise ValueError(\"df must have columns x, y, and z\")\n if namefunc is None:\n def namefunc(x,y,z):\n return f'{z}_{x}_{y}.png'\n\n opath = os.path.abspath(os.path.expanduser(output_dir))\n Path(opath).mkdir(parents=True, exist_ok=True)\n L = df.shape[0]\n flocs = [''] * L\n for i,xyz in enumerate(zip(df['x'],df['y'],df['z'])):\n x,y,z = xyz\n print(f\"({i+1} of {L})...\")\n sleep(0.75)\n outloc = os.path.join(opath,namefunc(x,y,z))\n if save_tile(x,y,z,outloc) == 0:\n flocs[i] = outloc\n df = df.assign(file_loc = flocs)\n return df[df['file_loc'] != '']", "def render_tile(self, filename, tile_x, tile_y, zoom):\n print 'Rendering %s' % (filename)\n\n # Calculate pixel positions of bottom-left & top-right\n half_width = self.width / 2\n half_height = self.height / 2\n px0 = (tile_x * self.width, (tile_y + 1) * self.height)\n px1 = ((tile_x + 1) * self.width, tile_y * self.height)\n\n # Convert tile coords to LatLng\n ll0 = self.tile_projection.fromPixelToLL(px0, zoom);\n ll1 = self.tile_projection.fromPixelToLL(px1, zoom);\n \n # Convert LatLng to map coords\n c0 = self.map_projection.forward(mapnik2.Coord(ll0[0], ll0[1]))\n c1 = self.map_projection.forward(mapnik2.Coord(ll1[0], ll1[1]))\n\n # Create bounding box for the render\n bbox = mapnik2.Box2d(c0.x, c0.y, c1.x, c1.y)\n\n self.mapnik_map.zoom_to_box(bbox)\n self.mapnik_map.buffer_size = max([half_width, half_height]) \n\n # Render image with default Agg renderer\n image = mapnik2.Image(self.width, self.height)\n mapnik2.render(self.mapnik_map, image)\n image.save(filename, self.filetype)", "def create_tiles(self, zoom):\n # Compute the tile x-y-z index range for the rasterlayer for this zoomlevel\n bbox = self.rasterlayer.extent()\n indexrange = tiler.tile_index_range(bbox, zoom)\n\n # Compute scale of tiles for this zoomlevel\n tilescale = tiler.tile_scale(zoom)\n\n # Count the number of tiles that are required to cover the raster at this zoomlevel\n nr_of_tiles = (indexrange[2] - indexrange[0] + 1) * (indexrange[3] - indexrange[1] + 1)\n\n # Create destination raster file\n self.log('Snapping dataset to zoom level {0}'.format(zoom))\n\n bounds = tiler.tile_bounds(indexrange[0], indexrange[1], zoom)\n sizex = (indexrange[2] - indexrange[0] + 1) * self.tilesize\n sizey = (indexrange[3] - indexrange[1] + 1) * self.tilesize\n dest_file = os.path.join(self.tmpdir, 'djangowarpedraster' + str(zoom) + '.tif')\n\n snapped_dataset = self.dataset.warp({\n 'name': dest_file,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'width': sizex,\n 'height': sizey,\n })\n\n self.log('Creating {0} tiles for zoom {1}.'.format(nr_of_tiles, zoom))\n\n counter = 0\n for tilex in range(indexrange[0], indexrange[2] + 1):\n for tiley in range(indexrange[1], indexrange[3] + 1):\n # Log progress\n counter += 1\n if counter % 250 == 0:\n self.log('{0} tiles created at zoom {1}'.format(counter, zoom))\n\n # Calculate raster tile origin\n bounds = tiler.tile_bounds(tilex, tiley, zoom)\n\n # Construct band data arrays\n pixeloffset = (\n (tilex - indexrange[0]) * self.tilesize,\n (tiley - indexrange[1]) * self.tilesize\n )\n\n band_data = [\n {\n 'data': band.data(offset=pixeloffset, size=(self.tilesize, self.tilesize)),\n 'nodata_value': band.nodata_value\n } for band in snapped_dataset.bands\n ]\n\n # Add tile data to histogram\n if zoom == self.max_zoom:\n self.push_histogram(band_data)\n\n # Warp source raster into this tile (in memory)\n dest = GDALRaster({\n 'width': self.tilesize,\n 'height': self.tilesize,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'srid': WEB_MERCATOR_SRID,\n 'datatype': snapped_dataset.bands[0].datatype(),\n 'bands': band_data,\n })\n\n # Store tile\n RasterTile.objects.create(\n rast=dest,\n rasterlayer=self.rasterlayer,\n tilex=tilex,\n tiley=tiley,\n tilez=zoom\n )\n\n # Store histogram data\n if zoom == self.max_zoom:\n bandmetas = RasterLayerBandMetadata.objects.filter(rasterlayer=self.rasterlayer)\n for bandmeta in bandmetas:\n bandmeta.hist_values = self.hist_values[bandmeta.band].tolist()\n bandmeta.save()\n\n # Remove snapped dataset\n self.log('Removing snapped dataset.', zoom=zoom)\n snapped_dataset = None\n os.remove(dest_file)", "def open_tile(filename):\n geoimg = gippy.GeoImage(filename, True)\n z, x, y = map(int, geoimg.basename().split('-')[0:4])\n tile = Tile.from_google(google_x=x, google_y=y, zoom=z)\n geoimg.set_srs('EPSG:3857')\n minpt = tile.bounds[0].meters\n maxpt = tile.bounds[1].meters\n affine = np.array(\n [\n minpt[0], (maxpt[0]-minpt[0])/geoimg.xsize(), 0.0,\n maxpt[1], 0.0, -(maxpt[1]-minpt[1])/geoimg.ysize()\n ])\n geoimg.set_affine(affine)\n geoimg.set_nodata(-1)\n return geoimg", "def tile(\n sceneid, tile_x, tile_y, tile_z, bands=(\"04\", \"03\", \"02\"), tilesize=256, **kwargs\n):\n scene_params = _sentinel_parse_scene_id(sceneid)\n\n if not isinstance(bands, tuple):\n bands = tuple((bands,))\n\n for band in bands:\n if band not in scene_params[\"valid_bands\"]:\n raise InvalidBandName(\"{} is not a valid Sentinel band name\".format(band))\n\n preview_file = os.path.join(\n scene_params[\"aws_bucket\"],\n scene_params[\"aws_prefix\"],\n scene_params[\"preview_file\"],\n )\n with rasterio.open(preview_file) as src:\n bounds = transform_bounds(src.crs, \"epsg:4326\", *src.bounds, densify_pts=21)\n\n if not utils.tile_exists(bounds, tile_z, tile_x, tile_y):\n raise TileOutsideBounds(\n \"Tile {}/{}/{} is outside image bounds\".format(tile_z, tile_x, tile_y)\n )\n\n mercator_tile = mercantile.Tile(x=tile_x, y=tile_y, z=tile_z)\n tile_bounds = mercantile.xy_bounds(mercator_tile)\n\n path_prefix = os.path.join(scene_params[\"aws_bucket\"], scene_params[\"aws_prefix\"])\n if scene_params[\"processingLevel\"] == \"L2A\":\n bands = [_l2_prefixed_band(b) for b in bands]\n else:\n bands = [\"B{}\".format(b) for b in bands]\n\n def _read_tile(path):\n with rasterio.open(path) as src_dst:\n return utils.tile_read(\n src_dst, bounds=tile_bounds, tilesize=tilesize, nodata=0, **kwargs\n )\n\n addresses = [\"{}/{}.jp2\".format(path_prefix, band) for band in bands]\n with futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:\n data, masks = zip(*list(executor.map(_read_tile, addresses)))\n mask = np.all(masks, axis=0).astype(np.uint8) * 255\n\n return np.concatenate(data), mask", "def get_tile(self, x, y):\n\n try:\n # if tile in cache, return it from there\n return self.tile_cache[(x,y)]\n except KeyError:\n # else not in cache: get image, cache and return it\n # exceptions are normally slow,\n # but we are reading a file if we get exception, so ...\n img_name = os.path.join(self.tile_level_dir,\n 'tile_%d_%d.png' % (x, y))\n\n# Optimization\n# removed since we *know* tiles are there, we generated them!\n# don't need to do filesystem operation.\n# maybe put back if tiles come from internet?\n# if not os.path.exists(img_name):\n# # if tile not there, use 'missing tile' file\n# img_name = os.path.join(self.tile_dir, MissingTileFilename)\n\n img = wx.Image(img_name, wx.BITMAP_TYPE_ANY)\n pic = img.ConvertToBitmap()\n self.tile_cache[(x,y)] = pic\n return pic", "def getTileFromEmptyDirectory(self, x, y, z, **kwargs):\n basez = z\n scale = 1\n dirlist = self._tiffDirectories\n frame = self._getFrame(**kwargs)\n if frame > 0 and hasattr(self, '_frames'):\n dirlist = self._frames[frame]['dirs']\n while dirlist[z] is None:\n scale *= 2\n z += 1\n while z - basez > self._maxSkippedLevels:\n z -= self._maxSkippedLevels\n scale = int(scale / 2 ** self._maxSkippedLevels)\n tile = PIL.Image.new('RGBA', (\n min(self.sizeX, self.tileWidth * scale), min(self.sizeY, self.tileHeight * scale)))\n maxX = 2.0 ** (z + 1 - self.levels) * self.sizeX / self.tileWidth\n maxY = 2.0 ** (z + 1 - self.levels) * self.sizeY / self.tileHeight\n for newX in range(scale):\n for newY in range(scale):\n if ((newX or newY) and ((x * scale + newX) >= maxX or\n (y * scale + newY) >= maxY)):\n continue\n subtile = self.getTile(\n x * scale + newX, y * scale + newY, z,\n pilImageAllowed=True, numpyAllowed=False,\n sparseFallback=True, edge=False, frame=frame)\n if not isinstance(subtile, PIL.Image.Image):\n subtile = PIL.Image.open(io.BytesIO(subtile))\n tile.paste(subtile, (newX * self.tileWidth,\n newY * self.tileHeight))\n return tile.resize((self.tileWidth, self.tileHeight),\n getattr(PIL.Image, 'Resampling', PIL.Image).LANCZOS)", "def _download_tile_wrapper(args):\n return download_tile(*args)", "def burn_tiles(region, zone, train_tier = 1, zoom_level = 19):\n \n os.system(f'cat ../../data/raw/train_tier_{train_tier}/{region}/{zone}/{zone}.json | supermercado burn {zoom_level} | mercantile shapes | fio collect > ../../data/raw/train_tier_{train_tier}/{region}/{zone}/tiles_{region}_{zone}_{zoom_level}.geojson')\n os.system(f'echo done with {region}_{zone}_{zoom_level}')", "def getTile(self, lat, lon):\r\n if self.childFileListDownload is not None and self.childFileListDownload.is_alive():\r\n '''print \"Getting file list\"'''\r\n return 0\r\n elif not self.filelist:\r\n '''print \"Filelist download complete, loading data\"'''\r\n data = open(self.filelist_file, 'rb')\r\n self.filelist = pickle.load(data)\r\n\r\n try:\r\n continent, filename = self.filelist[(int(lat), int(lon))]\r\n except KeyError:\r\n '''print \"here??\"'''\r\n return 0\r\n\r\n if not os.path.exists(os.path.join(self.cachedir, filename)):\r\n if self.childTileDownload is None or not self.childTileDownload.is_alive():\r\n self.childTileDownload = multiprocessing.Process(target=self.downloadTile, args=(continent, filename))\r\n self.childTileDownload.start()\r\n '''print \"Getting Tile\"'''\r\n return 0\r\n elif self.childTileDownload is not None and self.childTileDownload.is_alive():\r\n '''print \"Still Getting Tile\"'''\r\n return 0\r\n # TODO: Currently we create a new tile object each time.\r\n # Caching is required for improved performance.\r\n try:\r\n return SRTMTile(os.path.join(self.cachedir, filename), int(lat), int(lon))\r\n except InvalidTileError:\r\n return 0", "def stich(data, title=None):\n # Get name, list of tiles, width and height\n name = data[\"levels\"][0][\"name\"] \n tiles = data[\"levels\"][0][\"tiles\"]\n width = data[\"levels\"][0][\"width\"]\n height = data[\"levels\"][0][\"height\"]\n\n # Create the directory to place all the downloaded tiles in\n if title: #if title provided, name directory based on that\n dirname = title\n else: #if title not provided, generate a name\n dirname = name + str(width) + str(height)\n os.makedirs(dirname, exist_ok=True)\n os.chdir(dirname)\n\n #Create the empty image based on dimensions\n result = Image.new('RGB', (width, height))\n tile_size = None \n\n # actually get the tiles\n for i in tiles:\n image = get_tile(i['url']) #download image\n if not tile_size:\n tile_size = image.size[0] # on the first tile get the image size\n result.paste(im=image, box=(i['x'] * tile_size, i['y'] * tile_size)) # each tile has a number which isn't\n # it's cooridnate in pixels but it's order. \n # To get pixel coordinate just multiply by the size of each tile\n result.save('final.jpeg') # save file in directory\n os.chdir(os.path.join( os.path.dirname( __file__ ), '..' )) # then navigate back up to the base directory", "def tile_at(self, zoom, position):\n x, y = self.project_pixels(position, zoom)\n return (zoom, int(x/self.tileSize), int(y/self.tileSize))", "def renderMetaTile(z, x, y, ntiles, hypsoreliefMap, landcoverreliefMap, areasMap, oceanMap, contoursMap, featuresMap):\n hypsorelief = renderLayer('hypsorelief', z, x, y, ntiles, hypsoreliefMap, 'png')\n landcoverrelief = renderLayer('landcoverrelief', z, x, y, ntiles, landcoverreliefMap, 'png')\n areas = renderLayer('areas', z, x, y, ntiles, areasMap, 'png')\n ocean = renderLayer('ocean', z, x, y, ntiles, oceanMap, 'png', True)\n contours = renderLayer('contours', z, x, y, ntiles, contoursMap, 'png', True)\n features = renderLayer('features', z, x, y, ntiles, featuresMap, 'png', True)\n base_h = getComposite((hypsorelief, areas, ocean))\n base_l = getComposite((landcoverrelief, ocean))\n composite_h = getComposite((base_h, contours, features))\n composite_l = getComposite((base_l, contours, features))\n saveTiles(z, x, y, ntiles, 'composite_h', composite_h)\n saveTiles(z, x, y, ntiles, 'composite_l', composite_l)\n if SAVE_JPEG_COMPOSITE:\n basename = 'jpeg' + str(JPEG_COMPOSITE_QUALITY)\n saveTiles(z, x, y, ntiles, basename+'_h', composite_h, 'jpg', basename)\n saveTiles(z, x, y, ntiles, basename+'_l', composite_l, 'jpg', basename)\n if SAVE_INTERMEDIATE_TILES:\n saveTiles(z, x, y, ntiles, 'base_h', base_h)\n saveTiles(z, x, y, ntiles, 'base_l', base_l)\n saveTiles(z, x, y, ntiles, 'contours', contours)\n saveTiles(z, x, y, ntiles, 'hypsorelief', hypsorelief)\n saveTiles(z, x, y, ntiles, 'landcoverrelief', landcoverrelief)\n saveTiles(z, x, y, ntiles, 'areas', areas)\n saveTiles(z, x, y, ntiles, 'ocean', ocean)\n saveTiles(z, x, y, ntiles, 'features', features)", "def getTile( self, url, pathname ):\n \n # retry counters\n tries = 1; max_tries = 3\n while tries <= max_tries:\n\n try:\n\n # setup curl object - include ssl certificates\n curl = pycurl.Curl()\n curl.setopt(pycurl.CAINFO, certifi.where())\n curl.setopt(pycurl.URL, url )\n\n # write binary data to file\n fp = open( pathname, \"wb\" )\n curl.setopt(pycurl.WRITEDATA, fp)\n curl.perform()\n\n # close object and file\n curl.close()\n fp.close()\n\n print ( '{}: {} -> {}'. format( self._idx, url, pathname ))\n break\n\n except Exception as e:\n\n # increment retry counter - wait for random interval\n print ( 'Download Exception {}: {} -> {}'.format( str( e ), url, pathname ) )\n tries += 1\n time.sleep ( random.randrange( 5 ) )\n\n # delete file if download failed \n if tries > max_tries:\n os.remove( pathname )\n\n return", "def tile(sceneid, tile_x, tile_y, tile_z, bands=None, tilesize=256, **kwargs):\n if not bands:\n raise InvalidBandName(\"bands is required\")\n\n if not isinstance(bands, tuple):\n bands = tuple((bands,))\n\n for band in bands:\n if band not in SENTINEL_BANDS:\n raise InvalidBandName(\"{} is not a valid Sentinel band name\".format(band))\n\n scene_params = _sentinel_parse_scene_id(sceneid)\n sentinel_address = \"{}/{}/measurement\".format(SENTINEL_BUCKET, scene_params[\"key\"])\n\n mercator_tile = mercantile.Tile(x=tile_x, y=tile_y, z=tile_z)\n tile_bounds = mercantile.xy_bounds(mercator_tile)\n\n addresses = [\n \"{}/{}-{}.tiff\".format(sentinel_address, scene_params[\"beam\"].lower(), band)\n for band in bands\n ]\n\n def _s1_tiler(src_path):\n with rasterio.open(src_path) as src_dst:\n with WarpedVRT(\n src_dst,\n src_crs=src_dst.gcps[1],\n src_transform=transform.from_gcps(src_dst.gcps[0]),\n src_nodata=0,\n ) as vrt_dst:\n if not utils.tile_exists(vrt_dst.bounds, tile_z, tile_x, tile_y):\n raise TileOutsideBounds(\n \"Tile {}/{}/{} is outside image bounds\".format(\n tile_z, tile_x, tile_y\n )\n )\n\n return utils._tile_read(vrt_dst, bounds=tile_bounds, tilesize=tilesize)\n\n with futures.ThreadPoolExecutor() as executor:\n data, masks = zip(*list(executor.map(_s1_tiler, addresses)))\n mask = numpy.all(masks, axis=0).astype(numpy.uint8) * 255\n\n return numpy.concatenate(data), mask", "def open(self, path):\n\n # abre el tilemap en formato JSON\n data = JSON.open(path)\n\n # número de tiles en 'x' y 'y'\n self.width = data['width']\n self.height = data['height']\n\n # ancho y alto de los tiles\n self.tilewidth = data['tilewidth']\n self.tileheight = data['tileheight']\n\n # calcula las dimensiones del tilemap en pixeles\n self.rect.w = self.width * self.tilewidth\n self.rect.h = self.height * self.tileheight\n\n # extrae los tilesets\n tilesets = self.tilesets\n for tileset_node in data['tilesets']:\n tileset = TiledTileset(tileset_node, path)\n tilesets.append(tileset)\n self.split_tileset(tileset)\n\n # extrae las capas (layers)\n layers = self.layers\n for layer_node in data['layers']:\n layer = TiledLayer(layer_node)\n layers.append(layer)\n self.arrange_tiles(layer)", "def load_tile_geojson(region, zone, train_tier = 1, zoom_level = 19, visualize = False):\n\n tiles_gdf = gpd.read_file(f'../../data/raw/train_tier_{train_tier}/{region}/{zone}/tiles_{region}_{zone}_{zoom_level}.geojson')\n if visualize:\n tiles_gdf.plot(figsize=(10,10), color='grey', alpha=0.5, edgecolor='red')\n \n return tiles_gdf", "def get_map_tile_resource(self, location_data: tuple, location_type: str, zoom: int, img_size: int) -> str:\n # if user did not provide a col, row, we use self.get_tile()\n if location_type == \"latlon\":\n (col, row) = self.get_tile(*location_data, zoom)\n elif location_type == \"colrow\":\n (col, row) = location_data\n\n total_url = self.map_tile_base_url + str(zoom) + '/' + str(int(col)) + '/' + str(int(\n row)) + '/' + str(img_size) + '/png8?app_id=' + self.app_id + '&app_code=' + self.app_code\n\n return total_url", "def save_tiles(s_tiles: list, lvl: str):\n dir_path = f\"../levels/level{lvl}\"\n if not os.path.isdir(dir_path):\n os.mkdir(dir_path)\n json_file = open(f\"{dir_path}/tiles.json\", \"w\")\n json.dump(s_tiles, json_file)", "def save_tiles(self, tiles, output_dir):\n save_path = f\"{output_dir}/tiles.npy\"\n tiles_np = np.asarray(tiles)\n np.save(save_path, tiles_np)\n print(\"done saving .npy!\")", "def get_place():\n zoom = 21\n place = request.form.get(\"place\")\n file_name = f'data/coordinates/coord_{place}_segmentation'\n tiles = load_pickle_file(file_name)\n zoom_factor = 2**21 / 2**zoom\n picHeight = 600 / zoom_factor # Resulting image height in pixels (x2 if scale parameter is set to 2)\n picWidth = 600 / zoom_factor\n\n xScale = math.pow(2, zoom) / (picWidth/256)\n yScale = math.pow(2, zoom) / (picHeight/256)\n total_tiles_sp = 0\n total_count_sp = 0\n total_sp_area = 0\n for i, tile in enumerate(tiles):\n tile['filename'] = f\"s3://solarnet-data/{tile['file_name']}\"\n if \"mask_url\" not in tile:\n tile['mask_url'] = \"\"\n else:\n tmp_url = tile['mask_url'].replace(\"img/\", \"\")\n tile['mask_url'] = f\"https://solarnet-data.s3.us-west-2.amazonaws.com/{tmp_url}\"\n tile['bounds'] = ts_imgutil.getImageBounds(tile['w'], tile['h'], xScale, yScale, tile['lat'], tile['lng'])\n if \"panels_area\" in tile:\n total_sp_area += tile[\"panels_area\"]\n if \"panels_count\" in tile:\n total_count_sp += tile[\"panels_count\"]\n if \"prediction\" in tile and int(tile[\"prediction\"]) == 1:\n total_tiles_sp += 1\n return json.dumps([tiles, total_tiles_sp, total_count_sp, round(total_sp_area, 2), len(tiles), place])", "def _testTilesZXY(server, admin, itemId, metadata, tileParams=None,\n imgHeader=utilities.JPEGHeader, token=None):\n if tileParams is None:\n tileParams = {}\n if token:\n kwargs = {'token': token}\n else:\n kwargs = {'user': admin}\n # We should get images for all valid levels, but only within the\n # expected range of tiles.\n for z in range(metadata.get('minLevel', 0), metadata['levels']):\n maxX = math.ceil(float(metadata['sizeX']) * 2 ** (\n z - metadata['levels'] + 1) / metadata['tileWidth']) - 1\n maxY = math.ceil(float(metadata['sizeY']) * 2 ** (\n z - metadata['levels'] + 1) / metadata['tileHeight']) - 1\n # Check the four corners on each level\n for (x, y) in ((0, 0), (maxX, 0), (0, maxY), (maxX, maxY)):\n resp = server.request(path='/item/%s/tiles/zxy/%d/%d/%d' % (\n itemId, z, x, y), params=tileParams, isJson=False,\n **kwargs)\n if (resp.output_status[:3] != b'200' and\n metadata.get('sparse') and z > metadata['sparse']):\n assert utilities.respStatus(resp) == 404\n continue\n assert utilities.respStatus(resp) == 200\n image = utilities.getBody(resp, text=False)\n assert image[:len(imgHeader)] == imgHeader\n # Check out of range each level\n for (x, y) in ((-1, 0), (maxX + 1, 0), (0, -1), (0, maxY + 1)):\n resp = server.request(path='/item/%s/tiles/zxy/%d/%d/%d' % (\n itemId, z, x, y), params=tileParams, **kwargs)\n if x < 0 or y < 0:\n assert utilities.respStatus(resp) == 400\n assert 'must be positive integers' in resp.json['message']\n else:\n assert utilities.respStatus(resp) == 404\n assert ('does not exist' in resp.json['message'] or\n 'outside layer' in resp.json['message'])\n # Check negative z level\n resp = server.request(path='/item/%s/tiles/zxy/-1/0/0' % itemId,\n params=tileParams, **kwargs)\n assert utilities.respStatus(resp) == 400\n assert 'must be positive integers' in resp.json['message']\n # Check non-integer z level\n resp = server.request(path='/item/%s/tiles/zxy/abc/0/0' % itemId,\n params=tileParams, **kwargs)\n assert utilities.respStatus(resp) == 400\n assert 'must be integers' in resp.json['message']\n # If we set the minLevel, test one lower than it\n if 'minLevel' in metadata:\n resp = server.request(path='/item/%s/tiles/zxy/%d/0/0' % (\n itemId, metadata['minLevel'] - 1), params=tileParams, **kwargs)\n assert utilities.respStatus(resp) == 404\n assert 'layer does not exist' in resp.json['message']\n # Check too large z level\n resp = server.request(path='/item/%s/tiles/zxy/%d/0/0' % (\n itemId, metadata['levels']), params=tileParams, **kwargs)\n assert utilities.respStatus(resp) == 404\n assert 'layer does not exist' in resp.json['message']", "def save_display_tile(tile, save=True, display=False):\n tile_pil_img = tile_to_pil_tile(tile)\n\n if save:\n t = Time()\n img_path = slide.get_tile_image_path(tile)\n dir = os.path.dirname(img_path)\n if not os.path.exists(dir):\n os.makedirs(dir)\n tile_pil_img.save(img_path)\n print(\"%-20s | Time: %-14s Name: %s\" % (\"Save Tile\", str(t.elapsed()), img_path))\n\n if display:\n tile_pil_img.show()", "def get_tile(url):\n hash_name = hashlib.md5(url.encode(\"utf-16\")).hexdigest()\n fname = hash_name + \".jpeg\"\n print(\"Checking tile\" + fname)\n #if image is already downloaded, return it\n if os.path.isfile(fname):\n print(\"Downloaded!\")\n try:\n # image was fully downloaded, good to return\n return Image.open(fname) \n except Exception:\n print(\"Tile is corrupt :(\")\n # file is corrupted for some reason, so try to download it\n pass\n print(\"Downloading \" + fname)\n req.urlretrieve(url, fname) \n return Image.open(fname)", "def process_tile(tile):\n global base_kwds, resampling, src\n # Get the bounds of the tile.\n ulx, uly = mercantile.xy(\n *mercantile.ul(tile.x, tile.y, tile.z))\n lrx, lry = mercantile.xy(\n *mercantile.ul(tile.x + 1, tile.y + 1, tile.z))\n\n kwds = base_kwds.copy()\n kwds['transform'] = from_bounds(ulx, lry, lrx, uly, 256, 256)\n src_nodata = kwds.pop('src_nodata', None)\n dst_nodata = kwds.pop('dst_nodata', None)\n\n with rasterio.open('/vsimem/tileimg', 'w', **kwds) as tmp:\n reproject(rasterio.band(src, src.indexes),\n rasterio.band(tmp, tmp.indexes),\n src_nodata=src_nodata,\n dst_nodata=dst_nodata,\n num_threads=1,\n resampling=resampling)\n\n data = bytearray(virtual_file_to_buffer('/vsimem/tileimg'))\n\n # Workaround for https://bugs.python.org/issue23349.\n if sys.version_info[0] == 2 and sys.version_info[2] < 10:\n # Check for backported bug fix before re-ordering\n\tif kwds['driver'] == 'PNG' and data[0:8] == png_header:\n # Properly constructed PNG, no need to re-order bytes\n pass\n\telif kwds['driver'] == 'JPEG' and data[0:4] == jpeg_header:\n # Properly constructed JPEG, no need to re-order bytes\n pass\n\telse:\n data[:] = data[-1:] + data[:-1]\n\n return tile, data", "def write_base_tile(self, tx, ty, tz, xyzzy):\n\n data_bands = range(1, self.data_bands_count+1)\n data = self.out_ds.ReadRaster(xyzzy.rx, xyzzy.ry, xyzzy.rxsize, xyzzy.rysize,\n xyzzy.wxsize, xyzzy.wysize, band_list=data_bands)\n\n image_format = self.get_base_tile_format(tx, ty, tz, xyzzy)\n\n if image_format is None:\n return\n else:\n num_bands = self.get_num_bands(image_format)\n\n if self.verbose:\n print \"\\tReadRaster Extent: \", (xyzzy.rx, xyzzy.ry, xyzzy.rxsize, xyzzy.rysize),\n print 'z =',tz,' ; x =',tx,' ; y =',ty, (xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize)\n\n dstile = self.mem_drv.Create('', self.tile_size, self.tile_size, num_bands)\n\n path = self.get_full_path(tx, ty, tz, format_extension[image_format])\n\n # Query is in 'nearest neighbour' but can be bigger in then the tilesize\n # We scale down the query to the tilesize by supplied algorithm.\n if self.tile_size == xyzzy.querysize:\n self.fill_init_dest(dstile)\n\n # Use the ReadRaster result directly in tiles ('nearest neighbour' query)\n dstile.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, data, band_list=data_bands)\n if image_format == \"PNG\":\n dstile.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, self.alpha, band_list=[num_bands])\n\n gdal_write(path, dstile, image_format)\n\n # Note: For source drivers based on WaveLet compression (JPEG2000, ECW, MrSID)\n # the ReadRaster function returns high-quality raster (not ugly nearest neighbour)\n # TODO: Use directly 'near' for WaveLet files\n else:\n # Big ReadRaster query in memory scaled to the tilesize - all but 'near' algo\n dsquery = self.mem_drv.Create('', xyzzy.querysize, xyzzy.querysize, num_bands)\n self.fill_init_dest(dsquery)\n\n dsquery.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, data, band_list=data_bands)\n if image_format == \"PNG\":\n dsquery.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, self.alpha,band_list=[num_bands])\n\n self.resampler(path, dsquery, dstile, image_format)\n\n self.alpha = None", "def save_tile_data(tile_summary):\n\n time = Time()\n\n csv = summary_title(tile_summary) + \"\\n\" + summary_stats(tile_summary)\n\n csv += \"\\n\\n\\nTile Num,Row,Column,Tissue %,Tissue Quantity,Col Start,Row Start,Col End,Row End,Col Size,Row Size,\" + \\\n \"Color Factor,S and V Factor,Quantity Factor,Score\\n\"\n\n for t in tile_summary.tiles:\n line = \"%d,%d,%d,%4.2f,%s,%d,%d,%d,%d,%d,%d,%4.0f,%4.2f,%4.2f,%0.4f\\n\" % (\n t.tile_num, t.r, t.c, t.tissue_percentage, t.tissue_quantity().name, t.c_s, t.r_s, t.c_e, t.r_e, t.c_e - t.c_s,\n t.r_e - t.r_s,t.color_factor,\n t.s_and_v_factor, t.quantity_factor, t.score)\n csv += line\n\n data_path = slide.get_tile_data_path(tile_summary.slide_name)\n csv_file = open(data_path, \"w\")\n csv_file.write(csv)\n csv_file.close()\n\n print(\"%-20s | Time: %-14s Name: %s\" % (\"Save Tile Data\", str(time.elapsed()), data_path))", "def save(self):\n print(\"Clicked S(ave)\")\n saved_tiles = []\n for tile in self.tiles.sprites():\n # Append tiles pos to correct list if tile is occupied\n if not tile.is_available:\n tiles_attr = {\"type\": tile.tile_type, \"pos\": tile.rect.topleft}\n saved_tiles.append(tiles_attr)\n save_tiles(saved_tiles, lvl=\"02\")\n print(saved_tiles)\n # Flash white screen when level is saved\n self.surface.fill(s.WHITE)\n pygame.display.flip()\n pygame.time.wait(100)\n print(\"Saved\")", "def mbtiles(ctx, files, output, overwrite, title, description,\n layer_type, img_format, tile_size, zoom_levels, image_dump,\n num_workers, src_nodata, dst_nodata, resampling):\n output, files = resolve_inout(files=files, output=output,\n overwrite=overwrite)\n inputfile = files[0]\n\n logger = logging.getLogger('rio-mbtiles')\n\n with ctx.obj['env']:\n\n # Read metadata from the source dataset.\n with rasterio.open(inputfile) as src:\n\n validate_nodata(dst_nodata, src_nodata, src.profile.get('nodata'))\n base_kwds = {'dst_nodata': dst_nodata, 'src_nodata': src_nodata}\n\n if src_nodata is not None:\n base_kwds.update(nodata=src_nodata)\n\n if dst_nodata is not None:\n base_kwds.update(nodata=dst_nodata)\n\n # Name and description.\n title = title or os.path.basename(src.name)\n description = description or src.name\n\n # Compute the geographic bounding box of the dataset.\n (west, east), (south, north) = transform(\n src.crs, 'EPSG:4326', src.bounds[::2], src.bounds[1::2])\n\n # Resolve the minimum and maximum zoom levels for export.\n if zoom_levels:\n minzoom, maxzoom = map(int, zoom_levels.split('..'))\n else:\n zw = int(round(math.log(360.0 / (east - west), 2.0)))\n zh = int(round(math.log(170.1022 / (north - south), 2.0)))\n minzoom = min(zw, zh)\n maxzoom = max(zw, zh)\n\n logger.debug(\"Zoom range: %d..%d\", minzoom, maxzoom)\n\n # Parameters for creation of tile images.\n base_kwds.update({\n 'driver': img_format.upper(),\n 'dtype': 'uint8',\n 'nodata': 0,\n 'height': tile_size,\n 'width': tile_size,\n 'count': 3,\n 'crs': TILES_CRS})\n\n img_ext = 'jpg' if img_format.lower() == 'jpeg' else 'png'\n\n # Initialize the sqlite db.\n if os.path.exists(output):\n os.unlink(output)\n # workaround for bug here: https://bugs.python.org/issue27126\n sqlite3.connect(':memory:').close()\n\n conn = sqlite3.connect(output)\n cur = conn.cursor()\n cur.execute(\n \"CREATE TABLE tiles \"\n \"(zoom_level integer, tile_column integer, \"\n \"tile_row integer, tile_data blob);\")\n cur.execute(\n \"CREATE TABLE metadata (name text, value text);\")\n\n # Insert mbtiles metadata into db.\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"name\", title))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"type\", layer_type))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"version\", \"1.1\"))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"description\", description))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"format\", img_ext))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"bounds\", \"%f,%f,%f,%f\" % (west, south, east, north)))\n\n conn.commit()\n\n # Create a pool of workers to process tile tasks.\n pool = Pool(num_workers, init_worker,\n (inputfile, base_kwds, resampling), 100)\n\n # Constrain bounds.\n EPS = 1.0e-10\n west = max(-180 + EPS, west)\n south = max(-85.051129, south)\n east = min(180 - EPS, east)\n north = min(85.051129, north)\n\n # Initialize iterator over output tiles.\n tiles = mercantile.tiles(\n west, south, east, north, range(minzoom, maxzoom + 1))\n\n for tile, contents in pool.imap_unordered(process_tile, tiles):\n\n if contents is None:\n logger.info(\"Tile %r is empty and will be skipped\", tile)\n continue\n\n # MBTiles has a different origin than Mercantile/tilebelt.\n tiley = int(math.pow(2, tile.z)) - tile.y - 1\n\n # Optional image dump.\n if image_dump:\n img_name = '%d-%d-%d.%s' % (\n tile.x, tiley, tile.z, img_ext)\n img_path = os.path.join(image_dump, img_name)\n with open(img_path, 'wb') as img:\n img.write(contents)\n\n # Insert tile into db.\n cur.execute(\n \"INSERT INTO tiles \"\n \"(zoom_level, tile_column, tile_row, tile_data) \"\n \"VALUES (?, ?, ?, ?);\",\n (tile.z, tile.x, tiley, buffer(contents)))\n\n conn.commit()\n\n conn.close()\n # Done!", "def save_tile_mask(label_poly_series, tile_poly, xyz, tile_size, dataset, region, zone, save_path, channels = 3, display=False):\n \n \n\n prefix = f'{region}{zone}{dataset}_'\n x,y,z = xyz\n tfm = from_bounds(*tile_poly.bounds, tile_size, tile_size) \n \n cropped_polys = [poly for poly in label_poly_series if poly.intersects(tile_poly)]\n cropped_polys_gdf = gpd.GeoDataFrame(geometry=cropped_polys, crs={'init': 'epsg:4326'})\n \n fbc_mask = burn_mask(cropped_polys_gdf, tfm, tile_size, channels)\n # fbc_mask = sol.vector.mask.df_to_px_mask(df=cropped_polys_gdf,\n # channels=['footprint', 'boundary', 'contact'],\n # affine_obj=tfm, shape=(tile_size,tile_size),\n # boundary_width=5, boundary_type='inner', contact_spacing=5, meters=True)\n \n if display: \n plt.imshow(fbc_mask); plt.show()\n \n skimage.io.imsave(f'{save_path}/{prefix}{z}_{x}_{y}_mask.png',fbc_mask, check_contrast=False)", "def tilefilename(self, x, y, z):\n\n tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]\n return os.path.join(\"TileGroup%.0f\" % math.floor( tileIndex / 256 ),\n \"%s-%s-%s.%s\" % ( z, x, y, self.tileformat))", "def write_overview_tile(self, tx, ty, tz,tms_osm):\n\n image_format = self.get_overview_tile_format(tx, ty, tz)\n\n if image_format is None:\n return\n else:\n num_bands = self.get_num_bands(image_format)\n\n dsquery = self.mem_drv.Create('', 2*self.tile_size, 2*self.tile_size, num_bands)\n self.fill_init_dest(dsquery)\n # tms: z=19: 281626\n # -z=18-140813 176168*2=352336; 176168*2+1=352337\n # -- 352336,352337\n y_from=2*ty\n y_to=2*ty + 1\n ty_tms=ty;\n s_y_type=\"tms\"\n if tms_osm:\n # osm: z=19: 281626\n # -z=18-140813 85975*2+1=171951; 85975*2=171950\n # -- 171951,171950 [in range: last/end not used]\n y_from=2*ty + 1\n y_to=2*ty\n ty_tms=(2**tz-1) - ty\n s_y_type=\"osm\"\n s_tile_id=\"{0}-{1}-{2}.{3}\".format(str(tz), str(tx),str(ty),s_y_type)\n if self.verbose:\n # Build from zoom 19 tiles: (281626, 171951) (281627, 171951) (281626, 171950) (281627, 171950)\n print \"\\tBuild [\",s_tile_id,\"] from [\",self.output_dir,\"] zoom\", tz+1,\" tiles [\",s_y_type,\"]: \", (2*tx, y_from), (2*tx+1, y_from),(2*tx, y_to), (2*tx+1, y_to)\n\n for cx, cy, child_image_format in self.iter_children(tx, ty, tz):\n if (ty_tms==0 and cy==1) or (ty_tms!=0 and (cy % (y_from)) != 0):\n tileposy = 0\n else:\n tileposy = self.tile_size\n if tx:\n tileposx = cx % (2*tx) * self.tile_size\n elif tx==0 and cx==1:\n tileposx = self.tile_size\n else:\n tileposx = 0\n\n path = self.get_full_path(cx, cy, tz+1, format_extension[child_image_format])\n\n dsquerytile = gdal.Open(path, gdal.GA_ReadOnly)\n\n dsquery.WriteRaster(tileposx, tileposy, self.tile_size, self.tile_size,\n dsquerytile.ReadRaster(0, 0, self.tile_size, self.tile_size),\n band_list=range(1, dsquerytile.RasterCount+1))\n\n if image_format == \"PNG\" and dsquerytile.RasterCount != num_bands:\n dsquery.WriteRaster(tileposx, tileposy, self.tile_size, self.tile_size,\n self.get_alpha_filler(), band_list=[num_bands])\n\n dstile = self.mem_drv.Create('', self.tile_size, self.tile_size, num_bands)\n path = self.get_full_path(tx, ty, tz, format_extension[image_format])\n self.resampler(path, dsquery, dstile, image_format)", "def getPixel(self, **kwargs):\n # TODO: netCDF - currently this will read the values from the\n # default subdatatset; we may want it to read values from all\n # subdatasets and the main raster bands (if they exist), and label the\n # bands better\n pixel = super().getPixel(includeTileRecord=True, **kwargs)\n tile = pixel.pop('tile', None)\n if tile:\n # Coordinates in the max level tile\n x, y = tile['gx'], tile['gy']\n if self.projection:\n # convert to a scale of [-0.5, 0.5]\n x = 0.5 + x / 2 ** (self.levels - 1) / self.tileWidth\n y = 0.5 - y / 2 ** (self.levels - 1) / self.tileHeight\n # convert to projection coordinates\n x = self.projectionOrigin[0] + x * self.unitsAcrossLevel0\n y = self.projectionOrigin[1] + y * self.unitsAcrossLevel0\n # convert to native pixel coordinates\n x, y = self.toNativePixelCoordinates(x, y)\n if 0 <= int(x) < self.sizeX and 0 <= int(y) < self.sizeY:\n with self._getDatasetLock:\n for i in range(self.dataset.RasterCount):\n band = self.dataset.GetRasterBand(i + 1)\n try:\n value = band.ReadRaster(int(x), int(y), 1, 1, buf_type=gdal.GDT_Float32)\n if value:\n pixel.setdefault('bands', {})[i + 1] = struct.unpack('f', value)[0]\n except RuntimeError:\n pass\n return pixel", "def http_raster():\n return \"https://ungarj.github.io/mapchete_testdata/tiled_data/raster/cleantopo/1/0/0.tif\"", "async def get_tile_cache_preview(\n *, request: Request, dataset: str, version: str, implementation\n):\n\n tile_caches = get_dataset_tile_caches(dataset, version, implementation)\n sources = {\n \"carto-dark\": {\n \"type\": \"raster\",\n \"tiles\": [\n \"https://a.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n \"https://b.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n \"https://c.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n \"https://d.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n ],\n },\n }\n\n layers = [\n {\n \"id\": \"carto-dark-layer\",\n \"type\": \"raster\",\n \"source\": \"carto-dark\",\n \"minzoom\": 0,\n \"maxzoom\": 22,\n },\n ]\n for tile in tile_caches:\n if tile[\"asset_type\"] == \"Static vector tile cache\":\n try:\n style_specs = await get_static_vector_tile_cache_style_spec(tile)\n except ClientError:\n style_specs = get_default_style_spec(tile)\n else:\n style_specs = get_default_style_spec(tile)\n\n layers = [*layers, *style_specs[\"layers\"]]\n sources[dataset] = style_specs[\"sources\"][dataset]\n\n if len(layers) == 1:\n raise HTTPException(\n status_code=404, detail=\"No tile caches available for this dataset.\"\n )\n\n return templates.TemplateResponse(\n \"tile_preview.html\",\n context={\"sources\": sources, \"layers\": layers, \"request\": request},\n )", "def get_tile_tuple(point, zoom):\n z = int(zoom)\n dlon = 256\n dlat = 256\n\n lon0, lat0 = point.crs.project(*point.vertex[:2], inverse=True)\n c = 128/math.pi * 2**z\n x0 = c * (lon0*math.pi/180+math.pi)\n y0 = c * (math.pi-math.log(math.tan(math.pi/4+lat0*math.pi/360)))\n\n x = int(x0 // dlon)\n y = int(y0 // dlat)\n return z, x, y", "def load_tile(path, tile_size):\n img = pyglet.resource.image(path)\n img.width = tile_size\n img.height = tile_size\n return img", "def get_tiles_from_server(self, variants, server):\n def request_and_crop(zoom, x, y):\n _x = int(math.floor(x))\n _y = int(math.floor(y))\n\n x_mod = 0.5 - (x - _x) #How does this desviates from 0.5\n y_mod = 0.5 - (y - _y) \n\n if x_mod > 0:\n x_start = _x - 1 #1 tile before\n start_xpixel = int(math.floor((1-x_mod)*256))\n else:\n x_start = _x\n start_xpixel = int(math.floor(-1*x_mod*256))\n if y_mod > 0:\n y_start = _y - 1 #1 tile before\n start_ypixel = int(math.floor((1-y_mod)*256))\n else:\n y_start = _y\n start_ypixel = int(math.floor(-1*y_mod*256))\n\n tile = np.zeros((256*2, 256*2, 3), dtype= 'uint8')\n for x in range(2):\n for y in range(2):\n url = 'http://localhost:8080/{}/{}/{}.png'.format(zoom, x_start + x, y_start + y)\n resp = urlopen(url)\n image = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n tile[256*y:256*(y+1), 256*x:256*(x+1),...] = image\n tile = tile[start_ypixel:start_ypixel+256,start_xpixel:start_xpixel+256]\n return tile\n tiles = []\n for _ in range(variants):\n zoom = random.randint(19,21)\n x, y = self.getXY(zoom) \n tile = request_and_crop(zoom, x, y)\n tile = cv2.resize(tile, (self.width, self.height))\n tiles.append(tile)\n tiles = np.stack(tiles)\n return tiles", "def getImageTile(self, **kwargs):\n if self.hasImage():\n imgtileurl = self.getImage().absolute_url(1) + '_tile'\n portal_obj = getToolByName(self, 'portal_url').getPortalObject()\n portal_url = portal_obj.absolute_url(1)\n imgtileurl = imgtileurl.replace(portal_url, '')\n return imgtileurl\n return ''", "def tilefilename(self, x, y, z):\n\n tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]\n return os.path.join(\"TileGroup%.0f\" % math.floor(tileIndex / 256),\n \"%s-%s-%s.%s\" % (z, x, y, self.tileformat))", "def _get_tile_path(self, **kwargs) -> Union[CloudPath, Path]:\n band = kwargs.pop(\"band\")\n if band == PAN:\n tile_path = self._get_path(\"PAN\", \"tiff\")\n else:\n tile_path = self._get_path(\"MUX\", \"tiff\")\n\n return tile_path", "def tile(\n sceneid: str,\n tile_x: int,\n tile_y: int,\n tile_z: int,\n bands: Union[Sequence[str], str] = (\"04\", \"03\", \"02\"),\n tilesize: int = 256,\n **kwargs: Dict,\n) -> Tuple[numpy.ndarray, numpy.ndarray]:\n if isinstance(bands, str):\n bands = (bands,)\n\n scene_params = sentinel2_parser(sceneid)\n for band in bands:\n if band not in scene_params[\"valid_bands\"]:\n raise InvalidBandName(\"{} is not a valid Sentinel band name\".format(band))\n\n sentinel_prefix = \"{scheme}://{bucket}/{prefix}\".format(**scene_params)\n\n preview_file = os.path.join(sentinel_prefix, scene_params[\"preview_file\"])\n with rasterio.open(preview_file) as src:\n bounds = transform_bounds(\n src.crs, constants.WGS84_CRS, *src.bounds, densify_pts=21\n )\n if not tile_exists(bounds, tile_z, tile_x, tile_y):\n raise TileOutsideBounds(\n \"Tile {}/{}/{} is outside image bounds\".format(tile_z, tile_x, tile_y)\n )\n\n if scene_params[\"processingLevel\"] == \"L2A\":\n bands = [_l2_prefixed_band(b) for b in bands]\n else:\n bands = [\"B{}\".format(b) for b in bands]\n\n addresses = [f\"{sentinel_prefix}/{band}.jp2\" for band in bands]\n return reader.multi_tile(\n addresses, tile_x, tile_y, tile_z, tilesize=tilesize, nodata=0\n )", "def find_tiles(x_index = None, y_index = None):\n db_cursor2 = self.db_connection.cursor()\n\n sql = \"\"\"-- Check for any existing tiles\nselect\n tile_id,\n x_index,\n y_index,\n tile_type_id,\n tile_pathname,\n dataset_id,\n tile_class_id,\n tile_size\nfrom tile_footprint\ninner join tile using(x_index, y_index, tile_type_id)\nwhere (%(x_index)s is null or x_index = %(x_index)s)\n and (%(y_index)s is null or y_index = %(y_index)s)\n and tile_type_id = %(tile_type_id)s\n and dataset_id = %(fc_dataset_id)s\n\n and ctime is not null -- TODO: Remove this after reload\n;\n\"\"\"\n params = {'x_index': x_index,\n 'y_index': y_index,\n 'tile_type_id': tile_type_info['tile_type_id'],\n 'fc_dataset_id': dataset_info['fc_dataset_id']}\n \n log_multiline(logger.debug, db_cursor2.mogrify(sql, params), 'SQL', '\\t')\n db_cursor2.execute(sql, params)\n tile_info = {}\n for record in db_cursor2:\n tile_info_dict = {\n 'x_index': record[1],\n 'y_index': record[2],\n 'tile_type_id': record[3],\n 'tile_pathname': record[4],\n 'dataset_id': record[5],\n 'tile_class_id': record[6],\n 'tile_size': record[7]\n }\n tile_info[record[0]] = tile_info_dict # Keyed by tile_id\n \n log_multiline(logger.debug, tile_info, 'tile_info', '\\t')\n return tile_info", "def test_lat_lon_to_tile(self):\n\n lat = 48\n lon = 37.7\n z = 10\n\n tile_calculated = geomath.lat_lon_to_tile(lat,lon,z)\n tile_known = (619,355,10)\n\n # make sure the tiles are the same\n self.assertEqual(tile_calculated,tile_known)", "def query_image_tile(self, coord):", "def renderLayer(name, z, x, y, ntiles, map, suffix = 'png', useCairo = False):\n console.debugMessage(' Rendering layer: ' + name)\n env = getMercTileEnv(z, x, y, ntiles, True)\n tilesize = getTileSize(ntiles, True)\n map.zoom_to_box(env)\n if useCairo and USE_CAIRO:\n assert mapnik.has_cairo()\n surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, tilesize, tilesize)\n mapnik.render(map, surface)\n image = mapnik.Image.from_cairo(surface)\n else: \n image = mapnik.Image(tilesize, tilesize)\n mapnik.render(map, image)\n return image", "def find_tile(loc, dir):\n #returns the integer tile number\n \n # should be looking in the directory with supergrid data (probably \"fix\" directory)\n filename_pattern = '*grid.tile*.nc'\n \n #find all supergrid files in the directory\n grid_fnames = []\n for f_name in os.listdir(dir):\n if fnmatch.fnmatch(f_name, filename_pattern):\n grid_fnames.append(f_name)\n if not grid_fnames:\n message = 'No filenames matching the pattern {0} found in {1}'.format(filename_pattern,dir)\n logging.critical(message)\n raise Exception(message)\n \n #non-polar tiles can use traditional 2D point-in-polygon methods; if a point is not in a non-polar tile,\n #it is in one of the polar tiles, and the tile can be distinguished by the sign of latitude of the point\n polar_tile_filenames = []\n found_tile = False\n for f_name in grid_fnames:\n if not found_tile:\n nc_file = Dataset('{0}/{1}'.format(dir,f_name))\n longitude = np.array(nc_file['x']).swapaxes(0,1)\n latitude = np.array(nc_file['y']).swapaxes(0,1)\n nc_file.close()\n \n adj_long = False \n #look for reversal of longitude; if found, adjust longitude so that 0-360 transition doesn't exist\n for row in longitude:\n if not (np.all(np.diff(row) >= 0) or np.all(np.diff(row) <= 0)):\n adj_long = True\n if adj_long:\n longitude[longitude < 180] += 360\n \n #get lon/lat pairs for all edges of the tiles\n \n edge_1_lon = longitude[0,:]\n edge_1_lat = latitude[0,:]\n edge_1 = list(zip(edge_1_lon, edge_1_lat))\n \n edge_2_lon = longitude[:,-1]\n edge_2_lat = latitude[:,-1]\n edge_2 = list(zip(edge_2_lon, edge_2_lat))\n \n edge_3_lon = longitude[-1,:]\n edge_3_lat = latitude[-1,:]\n edge_3 = list(zip(edge_3_lon, edge_3_lat))\n edge_3.reverse() #need to reverse the direction of this edge to form a regular polygon\n \n edge_4_lon = longitude[:,0]\n edge_4_lat = latitude[:,0]\n edge_4 = list(zip(edge_4_lon, edge_4_lat))\n edge_4.reverse() #need to reverse the direction of this edge to form a regular polygon\n \n polygon_points = edge_1 + edge_2 + edge_3 + edge_4\n \n tile_polygon = Polygon(polygon_points)\n tile_polygon = tile_polygon.simplify(0)\n \n if tile_polygon.is_valid: #this will be True unless the tile is a polar tile, which will not form a regular polygon in Cartesian space using lon/lat data\n temp_loc = copy.deepcopy(loc)\n if adj_long:\n if loc[0] < 180:\n temp_loc[0] += 360\n loc_point = Point(temp_loc)\n if tile_polygon.contains(loc_point):\n found_tile = True\n return f_name.split('tile')[1].split('.nc')[0] \n else:\n polar_tile_filenames.append(f_name)\n \n #if the tile hasn't been found by this point, it must be contained within a polar tile\n for f_name in polar_tile_filenames:\n nc_file = Dataset('{0}/{1}'.format(dir,f_name))\n latitude = np.array(nc_file['y']).swapaxes(0,1)\n nc_file.close()\n \n #if the sign of the mean latitude of the tile is the same as that of the point, the tile has been found\n if np.sign(np.mean(latitude)) == np.sign(loc[1]):\n found_tile = True\n return f_name.split('tile')[1].split('.nc')[0] \n return -1", "def use_level(self, n):\n\n # try to get cache for this level, no cache means no level\n try:\n self.tile_cache = self.cache[n]\n except KeyError:\n return None\n\n # get tile info\n info = self.get_info(n)\n if info is None:\n return None\n\n (self.num_tiles_x, self.num_tiles_y, self.ppd_x, self.ppd_y) = info\n\n # cache partial path to level dir\n self.tile_level_dir = os.path.join(self.tile_dir, '%02d' % n)\n\n return (self.tile_size_x*self.num_tiles_x,\n self.tile_size_y*self.num_tiles_y,\n self.ppd_x, self.ppd_y)", "def lonToTile(lon, zoom):\n n = 2.0 ** zoom\n return ((lon + 180.0) / 360.0) * n", "def webtile_from_geotiff(self, geotiff_path, overwrite=True):\n\n # We will make a set of tiles for each band/stat in the geotiff so we\n # need to get the bands.\n stats = self.config.get_stat_names()\n palettes = self.palettes\n nodata_vals = self.config.get_nodata_vals()\n\n try:\n raster = Raster.from_file(geotiff_path)\n image_data = raster.data\n tile = self.tiles.tile_from_path(geotiff_path)\n\n message = f'Creating web tile {tile} from geotiff {geotiff_path}.'\n #id = self.__start_tracking(\n # 'webtiles_from_geotiffs', message=message)\n\n for i in range(len(stats)):\n stat = stats[i]\n # Get the path for the output web tile\n output_path = self.tiles.path_from_tile(\n tile, base_dir='web_tiles', style=stat)\n if os.path.isfile(output_path) and not overwrite:\n logger.info(f'Skip creating web tile for tile {tile} and '\n f'stat {stat}. Web tile already exists at '\n f'{output_path}')\n continue\n palette = palettes[i]\n nodata_val = nodata_vals[i]\n band_image_data = image_data[i]\n min_val = self.config.get_min(\n stat=stat, z=tile.z, sub_general=True)\n max_val = self.config.get_max(\n stat=stat, z=tile.z, sub_general=True)\n img = WebImage(\n image_data=band_image_data,\n palette=palette,\n min_val=min_val,\n max_val=max_val,\n nodata_val=nodata_val)\n img.save(output_path)\n\n message = f'Done creating web tile {tile}'\n self.__end_tracking(id, raster=raster, tile=tile, message=message)\n\n return tile\n\n except Exception as e:\n message = f'Error creating web tile for tile {tile} from '\n f'geotiff {geotiff_path}.'\n self.__end_tracking(id, tile=tile, error=e, message=message)\n return None", "def _save_mst_tile(tile, i, preread_ifgs):\n mst_tile = mst.mst_multiprocessing(tile, dest_tifs, preread_ifgs, params)\n # locally save the mst_mat\n mst_file_process_n = join(params[cf.TMPDIR], 'mst_mat_{}.npy'.format(i))\n np.save(file=mst_file_process_n, arr=mst_tile)", "def getTile(self, x, y):#TODO Make this a dictionary, and make a pprint function\n o = {}\n for layer in self.layers.keys():\n o[layer] = str(self.layers[layer][x, y])\n return o", "def getTile(self, x, y):\n if self._tiffInfo.get('orientation') not in {\n libtiff_ctypes.ORIENTATION_TOPLEFT,\n None}:\n return self._getTileRotated(x, y)\n # This raises an InvalidOperationTiffError if the tile doesn't exist\n tileNum = self._toTileNum(x, y)\n\n if (not self._tiffInfo.get('istiled') or\n self._tiffInfo.get('compression') not in (\n libtiff_ctypes.COMPRESSION_JPEG, 33003, 33005, 34712) or\n self._tiffInfo.get('bitspersample') != 8 or\n self._tiffInfo.get('sampleformat') not in {\n None, libtiff_ctypes.SAMPLEFORMAT_UINT}):\n return self._getUncompressedTile(tileNum)\n\n imageBuffer = io.BytesIO()\n\n if (self._tiffInfo.get('compression') == libtiff_ctypes.COMPRESSION_JPEG and\n not getattr(self, '_completeJpeg', False)):\n # Write JPEG Start Of Image marker\n imageBuffer.write(b'\\xff\\xd8')\n imageBuffer.write(self._getJpegTables())\n imageBuffer.write(self._getJpegFrame(tileNum))\n # Write JPEG End Of Image marker\n imageBuffer.write(b'\\xff\\xd9')\n return imageBuffer.getvalue()\n # Get the whole frame, which is in a JPEG or JPEG 2000 format, and\n # convert it to a PIL image\n imageBuffer.write(self._getJpegFrame(tileNum, True))\n image = PIL.Image.open(imageBuffer)\n # Converting the image mode ensures that it gets loaded once and is in\n # a form we expect. If this isn't done, then PIL can load the image\n # multiple times, which sometimes throws an exception in PIL's JPEG\n # 2000 module.\n if image.mode != 'L':\n image = image.convert('RGB')\n else:\n image.load()\n return image", "def export_image(self, bbox, zoomlevel, imagepath):\n assert has_pil, _(\"Cannot export image without python PIL\")\n grid,tile_bounds = self.grid_tiles(bbox, zoomlevel)\n width = len(grid[0])\n height = len(grid)\n widthpix = width * self.tile_size\n heightpix = height * self.tile_size\n result = Image.new(\"RGBA\", (widthpix, heightpix))\n offset = (0, 0)\n for i, row in enumerate(grid):\n for j, (x, y) in enumerate(row):\n offset = (j * self.tile_size, i * self.tile_size)\n img = self._tile_image(self.tile((zoomlevel, x, y)))\n result.paste(img, offset)\n if imagepath.endswith(\".tif\") or imagepath.endswith(\".tiff\"):\n # http://effbot.org/imagingbook/pil-index.htm#appendixes\n # Packbits, LZW, or JPEG\n # In the current version, PIL always writes uncompressed TIFF files.\n image_gdal=\"gdal_input.tif\"\n result.save(image_gdal, format=\"TIFF\", compression=\"JPEG\")\n self.geotif_image(tile_bounds,(widthpix,heightpix),imagepath,image_gdal)\n else:\n if imagepath.endswith(\".jpg\") or imagepath.endswith(\".jpeg\"):\n # IOError: encoder error -2 when writing image file\n result.save(imagepath, format=\"JPEG\", quality=int(self.jpg_quality), optimize=True, progressive=False)\n elif imagepath.endswith(\".png\") :\n result.save(imagepath, format=\"PNG\",optimize=True)\n else:\n result.save(imagepath)\n logger.info(_(\"-I-> export_image: Save resulting image to '%s' - bounds[%s]\") % (imagepath,tile_bounds))", "def batch_save_tile_mask(tiles_gdf, label_poly_series, tile_size, region, zone, save_path, channels=3, display=False):\n \n import warnings; warnings.simplefilter('ignore')\n\n for idx, tile in tqdm(tiles_gdf.iterrows()):\n dataset = tile['dataset']\n tile_poly = get_specific_tile(idx, tiles_gdf)\n save_tile_mask(label_poly_series, tile_poly, tile['xyz'], tile_size, dataset,\n region, zone, save_path, channels, display)", "def geometry_from_tile_coords(TileX, TileY, zoom):\n\n # Calculate lat, lon of upper left corner of tile\n PixelX = TileX * 256\n PixelY = TileY * 256\n lon_left, lat_top = pixel_coords_zoom_to_lat_lon(PixelX, PixelY, zoom)\n\n # Calculate lat, lon of lower right corner of tile\n PixelX = (TileX + 1) * 256\n PixelY = (TileY + 1) * 256\n lon_right, lat_bottom = pixel_coords_zoom_to_lat_lon(PixelX, PixelY, zoom)\n\n # Create Geometry\n ring = ogr.Geometry(ogr.wkbLinearRing)\n ring.AddPoint(lon_left, lat_top)\n ring.AddPoint(lon_right, lat_top)\n ring.AddPoint(lon_right, lat_bottom)\n ring.AddPoint(lon_left, lat_bottom)\n ring.AddPoint(lon_left, lat_top)\n poly = ogr.Geometry(ogr.wkbPolygon)\n poly.AddGeometry(ring)\n\n wkt_geom = poly.ExportToWkt()\n return wkt_geom", "def tile(self, x: int, y: int):\n return self.awmap.tile(x, y)", "def gettile(self, base_url=None, layer=None, style=None, format=None, tilematrixset=None, tilematrix=None, row=None, column=None):\n data = self.buildTileRequest(layer, style, format, tilematrixset, tilematrix, row, column)\n\n if base_url is None:\n base_url = self.getOperationByName('GetTile').methods['Get']['url']\n u = openURL(base_url, data, username = self.username, password = self.password)\n\n # check for service exceptions, and return\n if u.info()['Content-Type'] == 'application/vnd.ogc.se_xml':\n se_xml = u.read()\n se_tree = etree.fromstring(se_xml)\n err_message = unicode(se_tree.find('ServiceException').text).strip()\n raise ServiceException(err_message, se_xml)\n return u", "def get_image(self, p, t, c, z):\n assert p in self.position_map.keys(), \\\n \"Position index {} doesn't exist in map\".format(p)\n pos = self.get_zarr(p)\n return pos[t, c, z]", "def get_tile_at_position(level, position):\n size = level_size(level)\n index = position_to_index(position, size)\n return level[index]", "def get_tile(self, row, col):\n tile_index = (row - 1) * self.num_col_tiles + (col - 1)\n tile = self.tiles[tile_index]\n return tile", "def submit_download_mock(_self, _fetch_and_save, filename, dest_folder):\n # If filename == foo/bar/x_y_z_attr.dat, content == \"x_y_z_attr\"\n content = os.path.splitext(os.path.basename(filename))[0]\n if content.split(\"_\")[-1] == \"full\":\n content = {\"molecule\": content}\n qml.data.Dataset._write_file(content, os.path.join(dest_folder, filename))", "def get_cmx_map(campus, building, floor, file):\n\n url = CMX_URL + '/api/config/v1/maps/image/' + campus + '/' + building + '/' + floor\n\n header = {'content-type': 'application/json'}\n response = requests.get(url, headers=header, auth=CMX_AUTH, verify=False)\n print('\\nThe floor map request url is: ', url)\n print('Request status code is: ', response.status_code)\n\n if response.status_code == 200: # validate if the request was successful\n print('Assignment 2 completed')\n else:\n print('Assignment 2 not completed, please try again')\n\n # open a file to save the image to\n\n image_file = open(file, 'wb')\n image_file.write(response.content) # save the content of the request as it comes back as an image and not JSON\n image_file.close()", "def batch_save_tile_img(tiles_gdf, tif, tile_size, region, zone, save_path, display=False):\n for idx, tile in tqdm(tiles_gdf.iterrows()):\n dataset = tile['dataset']\n save_tile_img(tif, tile['xyz'], dataset, tile_size, region, zone, save_path, display=False)", "def save_tile(self, tile, overwrite=True):\n process_name = os.path.splitext(\n os.path.basename(self.config[\"process_file\"])\n )[0]\n new_process = imp.load_source(\n process_name + \"Process\",\n self.config[\"process_file\"]\n )\n self.config[\"tile\"] = tile\n self.config[\"tile_pyramid\"] = self.tile_pyramid\n mapchete_process = new_process.Process(self.config)\n try:\n mapchete_process.execute()\n except Exception as e:\n return tile, traceback.print_exc(), e\n finally:\n mapchete_process = None\n return tile, \"ok\", None", "def find_coverage(self, zoom):\n # Find a group of adjacent available tiles at this zoom level\n rows = self.mbtiles_cursor.execute('''SELECT tile_column, tile_row FROM tiles WHERE zoom_level=? ORDER BY tile_column, tile_row;''', (zoom,))\n tile = rows.fetchone()\n xmin, ymin = tile\n tile_prev = tile\n while tile and tile[0] - tile_prev[0] <= 1:\n # adjacent, go on\n tile_prev = tile\n tile = rows.fetchone()\n xmax, ymax = tile_prev\n # Transform (xmin, ymin) (xmax, ymax) to pixels\n tile_size = self.tilesize\n bottomleft = (xmin * tile_size, (ymax + 1) * tile_size)\n topright = ((xmax + 1) * tile_size, ymin * tile_size)\n # Convert center to (lon, lat)\n mercator = GlobalMercator(self.tms_osm,tile_size,[zoom])\n return mercator.unproject_pixels(bottomleft, zoom) + mercator.unproject_pixels(topright, zoom)", "def cache(self):\n\t\tprint self.url\n\t\tif self.url and not self.streetimage:\n\t\t\tresult = urllib.urlretrieve(self.url)\n\t\t\tfname = os.path.basename(self.url).split('&')[-1]+\".jpg\"\n\t\t\tprint 'fname = ', fname, 'result = ', result\n\t\t\tself.streetimage.save(fname, File(open(result[0])))\n\t\t\tself.save()", "def get_objects():\n\n # check whether this session is over its limit\n if 'tiles' not in session:\n session['tiles'] = 0\n\n print(\"tiles queried in session:\", session['tiles'])\n if session['tiles'] > MAX_TILES_SESSION:\n return \"-1\"\n\n # start time, get params\n type = request.form.get(\"type\")\n bounds = request.form.get(\"bounds\")\n height = float(request.form.get(\"height\"))\n width = float(request.form.get(\"width\"))\n zoom = int(request.form.get(\"zoom\"))\n # zoom = 16\n print(\" bounds:\", bounds)\n print(\" width:\", width)\n print(\" height:\", height)\n print(\" zoom:\", zoom)\n\n # cropping\n crop_tiles = False\n\n # create a map provider object\n map_object = GoogleMap(google_api_key)\n\n # divide map into tiles\n tiles, nx, ny, meters, h, w = map_object.make_tiles(bounds, crop_tiles=crop_tiles)\n tiles_overlap, nx_overlap, ny_overlap, meters_overlap, h_overlap, w_overlap = map_object.make_tiles(bounds, overlap_percent=2, crop_tiles=crop_tiles)\n print(f\" {len(tiles)} tiles, {nx} x {ny}, {meters} x {meters} m\")\n # print(\" Tile centers:\")\n # for c in tiles:\n # print(\" \",c)\n\n tiles = [t for t in tiles if ts_maps.check_tile_against_bounds(t, bounds)]\n for i, tile in enumerate(tiles):\n tile['id'] = i\n\n print(\" tiles left after viewport and polygon filter:\", len(tiles))\n\n if \"tmpdirname\" in session:\n rmtree(session['tmpdirname'], ignore_errors=True, onerror=None)\n print(\"cleaned up tmp dir\", session['tmpdirname'])\n del session['tmpdirname']\n\n # make a new tempdir name and attach to session\n tmpdir = tempfile.TemporaryDirectory()\n tmpdirname = tmpdir.name\n tmpfilename = tmpdirname[tmpdirname.rindex(\"/\")+1:]\n print(\"creating tmp dir\", tmpdirname)\n session['tmpdirname'] = tmpdirname\n tmpdir.cleanup()\n os.mkdir(tmpdirname)\n print(\"created tmp dir\", tmpdirname)\n\n # retrieve tiles and metadata if available\n meta = map_object.get_sat_maps(tiles, loop, tmpdirname, tmpfilename)\n session['metadata'] = meta\n print(\" asynchronously retrieved\", len(tiles), \"files\")\n\n # we create tiles at zoom=21, so factor the size by the current zoom\n zoom_factor = 2**21 / 2**zoom\n picHeight = 600 / zoom_factor # Resulting image height in pixels (x2 if scale parameter is set to 2)\n picWidth = 600/zoom_factor\n\n xScale = math.pow(2, zoom) / (picWidth/256)\n yScale = math.pow(2, zoom) / (picHeight/256)\n\n for i, tile in enumerate(tiles):\n tile['filename'] = tmpdirname+\"/\"+tmpfilename+str(i)+\".jpg\"\n tile['bounds'] = ts_imgutil.getImageBounds(tile['w'], tile['h'], xScale, yScale, tile['lat'], tile['lng'])\n\n if type == 'tiles':\n return json.dumps(tiles)\n elif type == 'classification':\n model_classification = Classification()\n tiles = model_classification.predict(tiles)\n return json.dumps(tiles)\n elif type == 'segmentation':\n model_classification = Classification()\n tiles = model_classification.predict(tiles)\n tiles_pred = list(filter(lambda x: x[\"prediction\"] == 1, tiles))\n if len(tiles_pred) > 0:\n model_segmentation = Segmentation()\n # our tiles for prediction are at zoom 21\n result_tiles = model_segmentation.predict(tiles_pred, 21)\n for i, tile in enumerate(tiles):\n if tile[\"id\"] in result_tiles:\n tiles[i] = result_tiles[tile[\"id\"]]\n if \"mask_url\" in tiles[i]:\n tiles[i][\"mask_url\"] = f\"/{tiles[i]['mask_url']}\"\n return json.dumps(tiles)", "def main(parameters):\n metadata = get_metadata(parameters)\n # pprint(metadata)\n image_api = NswSatelliteImages(parameters, metadata)\n print('Zoom level:', image_api.zoom_level,\n 'Resolution:', image_api.resolution,\n 'Scale:', image_api.scale)\n image_api.download_tile(xtile=39000, ytile=60000)", "def get_tile(self, position):\n return self.tiles[position[x]][position[y]]", "def latToTile(lat, zoom):\n n = 2.0 ** zoom\n lat_rad = math.radians(lat)\n return (1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n", "def convert_tile(fname, out_fname, compression, filter_opts):\n with h5py.File(out_fname, 'w') as fid:\n with rasterio.open(fname) as ds:\n # global attributes\n attach_attributes(fid, ds.tags())\n\n # find and convert every subsdataset (sds)\n for sds_name in ds.subdatasets:\n with rasterio.open(sds_name) as sds:\n ds_name = Path(sds_name.replace(':', '/')).name\n\n # create empty or copy the user supplied filter options\n if not filter_opts:\n f_opts = dict()\n else:\n f_opts = filter_opts.copy()\n\n # use sds native chunks if none are provided\n if 'chunks' not in f_opts:\n f_opts['chunks'] = list(sds.block_shapes[0])\n\n # modify to have 3D chunks if we have a multiband sds\n if sds.count == 3:\n # something could go wrong if a user supplies\n # a 3D chunk eg (2, 256, 340)\n f_opts['chunks'].insert(0, 1)\n f_opts['chunks'] = tuple(f_opts['chunks'])\n else:\n f_opts['chunks'] = tuple(f_opts['chunks'])\n\n # subdataset attributes and spatial attributes\n attrs = sds.tags()\n attrs['geotransform'] = sds.transform.to_gdal()\n attrs['crs_wkt'] = sds.crs.wkt\n\n # ensure single band sds is read a 2D not 3D\n data = sds.read() if sds.count == 3 else sds.read(1)\n\n # write to disk as an IMAGE Class Dataset\n write_h5_image(data, ds_name, fid, attrs=attrs,\n compression=compression,\n filter_opts=f_opts)", "def getGoogleMap(self, lat, lng, wTileN, hTileN, zoom):\n start_x, start_y = self.getStartTlXY(lat, lng, zoom)\n width, height = 256 * wTileN, 256 * hTileN\n map_img = Image.new('RGB', (width, height))\n for x in range(0, wTileN):\n for y in range(0, hTileN):\n url = 'https://mt0.google.com/vt?x=' + \\\n str(start_x+x)+'&y='+str(start_y+y)+'&z='+str(zoom)\n current_tile = str(x)+'-'+str(y)\n urllib.request.urlretrieve(url, current_tile)\n map_img.paste(Image.open(current_tile), (x*256, y*256))\n os.remove(current_tile)\n return map_img", "def get_tile_info(file_info, img_info):\n all_tiles = []\n new_tiles = {}\n if img_info['invert_x']:\n xmin = img_info['width']*img_info['scale'] - img_info['viewer']['right']\n xmax = img_info['width']*img_info['scale'] - img_info['viewer']['left']\n else:\n xmin = img_info['viewer']['left']\n xmax = img_info['viewer']['right']\n if img_info['invert_y']:\n ymin = img_info['height']*img_info['scale'] - img_info['viewer']['bottom']\n ymax = img_info['height']*img_info['scale'] - img_info['viewer']['top']\n else:\n ymin = img_info['viewer']['top']\n ymax = img_info['viewer']['bottom']\n minCol = int(max(1,math.floor(xmin/file_info['tile_width'])))-1\n maxCol=int(min(img_info['columns'],math.ceil(xmax/file_info['tile_width'])))\n minRow = int(max(1,math.floor(ymin/file_info['tile_height'])))-1\n maxRow = int(min(img_info['rows'],math.ceil(ymax/file_info['tile_height'])))\n \n block_width = int(math.ceil(file_info['tile_width']/img_info['scale']))\n block_height = int(math.ceil(file_info['tile_height']/img_info['scale']))\n \n for row in range(minRow,maxRow):\n y0 = row*file_info['tile_height']\n yf = (row+1)*file_info['tile_height']\n y0_idx = int(y0/img_info['scale'])\n yf_idx = min(y0_idx + block_height, img_info['height'])\n for col in range(minCol,maxCol):\n all_tiles.append(str(col)+','+str(row))\n tile_idx = str(col)+','+str(row)\n if (tile_idx not in img_info['tiles'] or \n 'loaded' not in img_info['tiles'][tile_idx] or\n not img_info['tiles'][tile_idx]['loaded']):\n x0 = col*file_info['tile_width']\n xf = (col+1)*file_info['tile_width']\n x0_idx = int(x0/img_info['scale'])\n xf_idx = min(x0_idx+block_width, img_info['width'])\n tile_width = int((xf_idx-x0_idx)*img_info['scale'])\n tile_height = int((yf_idx-y0_idx)*img_info['scale'])\n new_filepath = get_tile_filename(\n file_info, img_info, x0_idx, xf_idx, y0_idx, yf_idx)\n tile = {\n 'idx': tile_idx,\n 'left': x0,\n 'right': xf,\n 'top': y0,\n 'bottom': yf,\n 'y0_idx': y0_idx,\n 'yf_idx': yf_idx,\n 'x0_idx': x0_idx,\n 'xf_idx': xf_idx,\n 'new_filepath': new_filepath,\n 'loaded': False,\n 'row': row,\n 'col': col,\n 'x': col*file_info['tile_width'],\n 'y': row*file_info['tile_height'],\n 'width': tile_width,\n 'height': tile_height\n }\n if img_info['invert_y']:\n tile['top'] = yf\n tile['bottom'] = y0\n if img_info['invert_x']:\n tile['left'] = xf\n tile['right'] = x0\n new_tiles[tile_idx] = tile\n print('viewer:', img_info['viewer'])\n print('new tiles', new_tiles.keys())\n return all_tiles, new_tiles", "def get_tile(self, point):\n print \"Getting tile for %s\" % repr(point)\n return self.matrix[point.y][point.x]", "def scn2tilecache(self, unq_id):\n if (self.tilecachePath is None) or (not os.path.exists(self.tilecachePath)):\n raise EODataDownException(\"The tilecache path does not exist or not provided, please create and run again.\")\n\n if not os.path.exists(self.ardProdTmpPath):\n raise EODataDownException(\"The tmp path does not exist, please create and run again.\")\n\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n logger.debug(\"Perform query to find scene.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == unq_id).one_or_none()\n if query_result is not None:\n if not query_result.ARDProduct:\n raise EODataDownException(\"Cannot create a tilecache as an ARD product has not been created.\")\n if query_result.Invalid:\n raise EODataDownException(\"Cannot create a tilecache as image has been assigned as 'invalid'.\")\n\n scn_json = query_result.ExtendedInfo\n if (scn_json is None) or (scn_json == \"\"):\n scn_json = dict()\n\n ard_img_path = query_result.ARDProduct_Path\n eodd_utils = eodatadown.eodatadownutils.EODataDownUtils()\n ard_img_file = eodd_utils.findFile(ard_img_path, '*dB*.tif')\n\n out_tilecache_dir = os.path.join(self.tilecachePath,\n \"{}_{}\".format(query_result.Product_File_ID, query_result.PID))\n if not os.path.exists(out_tilecache_dir):\n os.mkdir(out_tilecache_dir)\n\n out_visual_gtiff = os.path.join(out_tilecache_dir,\n \"{}_{}_vis.tif\".format(query_result.Product_File_ID, query_result.PID))\n\n tmp_tilecache_path = os.path.join(self.ardProdTmpPath,\n \"tilecache_{}_{}\".format(query_result.Product_File_ID, query_result.PID))\n if not os.path.exists(tmp_tilecache_path):\n os.mkdir(tmp_tilecache_path)\n\n # VV, VH, VV/VH\n bands = '1,2,3'\n\n import rsgislib.tools.visualisation\n rsgislib.tools.visualisation.createWebTilesVisGTIFFImg(ard_img_file, bands, out_tilecache_dir,\n out_visual_gtiff, zoomLevels='2-12',\n img_stats_msk=None, img_msk_vals=1,\n stretch_file=self.std_vis_img_stch,\n tmp_dir=tmp_tilecache_path, webview=True,\n scale=50)\n\n if not (\"tilecache\" in scn_json):\n scn_json[\"tilecache\"] = dict()\n scn_json[\"tilecache\"][\"tilecachepath\"] = out_tilecache_dir\n scn_json[\"tilecache\"][\"visgtiff\"] = out_visual_gtiff\n query_result.ExtendedInfo = scn_json\n flag_modified(query_result, \"ExtendedInfo\")\n ses.add(query_result)\n ses.commit()\n else:\n raise EODataDownException(\"Could not find input image with PID {}\".format(unq_id))\n ses.close()\n logger.debug(\"Closed the database session.\")\n shutil.rmtree(tmp_tilecache_path)", "def update_tile(self, data):\n self.send(text_data=MyCache().get(tile_id=getRedisPrefix(data['tile_id'])))", "def get_info(self, level):\n\n # see if we can open the tile info file.\n info_file = os.path.join(self.tile_dir, '%02d' % level,\n self.TileInfoFilename)\n try:\n fd = open(info_file, 'rb')\n except IOError:\n return None\n\n # OK, looks like we actually do have this level!\n info = pickle.load(fd)\n fd.close()\n\n return info", "def get_tile_index_range(dataset_filename):\n dataset = gdal.Open(dataset_filename)\n assert dataset, 'Unable to open dataset %s' % dataset_filename\n spatial_reference = osr.SpatialReference()\n spatial_reference.ImportFromWkt(dataset.GetProjection())\n geotransform = dataset.GetGeoTransform()\n logger.debug('geotransform = %s', geotransform)\n# latlong_spatial_reference = spatial_reference.CloneGeogCS()\n tile_spatial_reference = osr.SpatialReference()\n s = re.match('EPSG:(\\d+)', tile_type_info['crs'])\n if s:\n epsg_code = int(s.group(1))\n logger.debug('epsg_code = %d', epsg_code)\n assert tile_spatial_reference.ImportFromEPSG(epsg_code) == 0, 'Invalid EPSG code for tile projection'\n else:\n assert tile_spatial_reference.ImportFromWkt(tile_type_info['crs']), 'Invalid WKT for tile projection'\n \n logger.debug('Tile WKT = %s', tile_spatial_reference.ExportToWkt())\n \n coord_transform_to_tile = osr.CoordinateTransformation(spatial_reference, tile_spatial_reference)\n # Upper Left\n xmin, ymax, _z = coord_transform_to_tile.TransformPoint(geotransform[0], geotransform[3], 0)\n # Lower Right\n xmax, ymin, _z = coord_transform_to_tile.TransformPoint(geotransform[0] + geotransform[1] * dataset.RasterXSize, \n geotransform[3] + geotransform[5] * dataset.RasterYSize, \n 0)\n \n logger.debug('Coordinates: xmin = %f, ymin = %f, xmax = %f, ymax = %f', xmin, ymin, xmax, ymax)\n\n return (int(floor((xmin - tile_type_info['x_origin']) / tile_type_info['x_size'])), \n int(floor((ymin - tile_type_info['y_origin']) / tile_type_info['y_size'])), \n int(ceil((xmax - tile_type_info['x_origin']) / tile_type_info['x_size'])), \n int(ceil((ymax - tile_type_info['y_origin']) / tile_type_info['y_size'])))", "def _parse_and_store_geojson(filename):\n ds = DataSource(filename)\n _sanity_check_datasource(ds)\n\n logger.info('Data file %s was opened', ds.name)\n lm = LayerMapping(WegStuk, ds, MAPPING)\n\n with transaction.atomic():\n WegStuk.objects.all().delete()\n lm.save(strict=True, verbose=False)\n\n logger.info('Travel time dataset was updated.')", "def load_tile(tile):\n return pygame.image.load(tile[\"states\"][\"default\"][0])", "def write_tiles_to_file(tiles, gfx_file, output_file=None):\n if output_file is None:\n output_file = gfx_file + '_edit'\n\n sorted_tiles = sorted(tiles, key=lambda tile: int(tile.address, 16))\n\n with open(gfx_file, 'rb') as gfx_reader:\n with open(output_file, 'wb') as f:\n for tile in sorted_tiles:\n converted_addr = convert_mame_addr(tile.address, tile.dimensions)\n read_length = converted_addr - gfx_reader.tell()\n if read_length == 128:\n gfx_reader.seek(read_length, 1)\n f.write(tile.data)\n else:\n unchanged_gfx = gfx_reader.read(read_length)\n f.write(unchanged_gfx)\n gfx_reader.seek(128, 1)\n f.write(tile.data)\n\n final_read = gfx_reader.read()\n f.write(final_read)", "def generate_base_tiles(self):\n\n if not self.options.quiet:\n print(\"Generating Base Tiles:\")\n\n if self.options.verbose:\n print('')\n print(\"Tiles generated from the max zoom level:\")\n print(\"----------------------------------------\")\n print('')\n\n # Set the bounds\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n\n ds = self.warped_input_dataset\n tilebands = self.dataBandsCount + 1\n querysize = self.querysize\n\n if self.options.verbose:\n print(\"dataBandsCount: \", self.dataBandsCount)\n print(\"tilebands: \", tilebands)\n\n tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n ti = 0\n\n tile_details = []\n\n tz = self.tmaxz\n for ty in range(tmaxy, tminy-1, -1):\n for tx in range(tminx, tmaxx+1):\n\n ti += 1\n ytile = GDAL2Tiles.getYtile(ty, tz, self.options)\n tilefilename = os.path.join(\n self.output_folder, str(tz), '{0:04d}'.format(tx) + \"_\" + '{0:04d}'.format(ytile) + \".\" + self.tileext)\n if self.options.verbose:\n print(ti, '/', tcount, tilefilename)\n\n if self.options.resume and os.path.exists(tilefilename):\n if self.options.verbose:\n print(\"Tile generation skipped because of --resume\")\n continue\n\n # Create directories for the tile\n if not os.path.exists(os.path.dirname(tilefilename)):\n os.makedirs(os.path.dirname(tilefilename))\n\n if self.options.profile == 'mercator':\n # Tile bounds in EPSG:3857\n b = self.mercator.TileBounds(tx, ty, tz)\n elif self.options.profile == 'geodetic':\n b = self.geodetic.TileBounds(tx, ty, tz)\n\n # Don't scale up by nearest neighbour, better change the querysize\n # to the native resolution (and return smaller query tile) for scaling\n\n if self.options.profile in ('mercator', 'geodetic'):\n rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])\n\n # Pixel size in the raster covering query geo extent\n nativesize = wb[0] + wb[2]\n if self.options.verbose:\n print(\"\\tNative Extent (querysize\", nativesize, \"): \", rb, wb)\n\n # Tile bounds in raster coordinates for ReadRaster query\n rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)\n\n rx, ry, rxsize, rysize = rb\n wx, wy, wxsize, wysize = wb\n\n else: # 'raster' profile:\n\n tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom\n xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels\n ysize = self.warped_input_dataset.RasterYSize\n if tz >= self.nativezoom:\n querysize = self.tilesize\n\n rx = (tx) * tsize\n rxsize = 0\n if tx == tmaxx:\n rxsize = xsize % tsize\n if rxsize == 0:\n rxsize = tsize\n\n rysize = 0\n if ty == tmaxy:\n rysize = ysize % tsize\n if rysize == 0:\n rysize = tsize\n ry = ysize - (ty * tsize) - rysize\n\n wx, wy = 0, 0\n wxsize = int(rxsize/float(tsize) * self.tilesize)\n wysize = int(rysize/float(tsize) * self.tilesize)\n if wysize != self.tilesize:\n wy = self.tilesize - wysize\n\n # Read the source raster if anything is going inside the tile as per the computed\n # geo_query\n tile_details.append(\n TileDetail(\n tx=tx, ty=ytile, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,\n wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,\n )\n )\n\n conf = TileJobInfo(\n src_file=self.tmp_vrt_filename,\n nb_data_bands=self.dataBandsCount,\n output_file_path=self.output_folder,\n tile_extension=self.tileext,\n tile_driver=self.tiledriver,\n tile_size=self.tilesize,\n kml=self.kml,\n tminmax=self.tminmax,\n tminz=self.tminz,\n tmaxz=self.tmaxz,\n in_srs_wkt=self.in_srs_wkt,\n out_geo_trans=self.out_gt,\n ominy=self.ominy,\n is_epsg_4326=self.isepsg4326,\n options=self.options,\n )\n\n return conf, tile_details", "def setTile(self, cell, tile):\n assert isinstance(cell, tuple)\n cellx, celly = cell\n\n if cellx < 0 or cellx > self.map_array.shape[0]-1 or celly < 0 or celly > self.map_array.shape[1]-1:\n return\n\n if self.tile_dict.get((cellx, celly)):\n self.canvas.delete(self.tile_dict[(cellx, celly)])\n\n if tile:\n self.map_array[cellx, celly] = tile.tid\n if tile.tid == 0.0:\n return\n map_posx, map_posy = iso(cellx * self.cell_width, celly * self.cell_height)\n image = self.main.main_tilelist.images[tile.tid]\n self.tile_dict[(cellx, celly)] = self.canvas.create_image(map_posx, map_posy, image=image, anchor=tk.N)", "def get_our_tile(self, x, y):\n\t\tif x >= 0 and x < self.w and y >= 0 and y < self.h:\n\t\t\treturn self.our_tiles[x][y]\n\t\treturn None" ]
[ "0.7046244", "0.6987471", "0.6816313", "0.6353406", "0.6337127", "0.63041425", "0.6291108", "0.6263484", "0.6197", "0.6188584", "0.6176481", "0.6158082", "0.60673577", "0.60509396", "0.60040617", "0.58420604", "0.5838847", "0.58381325", "0.5794229", "0.5779506", "0.57553935", "0.57083213", "0.5698197", "0.56559926", "0.56326807", "0.562034", "0.5591855", "0.5576654", "0.55550414", "0.55212045", "0.54587936", "0.5446433", "0.54421127", "0.540441", "0.53915244", "0.538882", "0.53665346", "0.5346257", "0.53426003", "0.5309626", "0.53003734", "0.53001004", "0.529162", "0.52749884", "0.5239406", "0.5234195", "0.5234194", "0.52210194", "0.5213782", "0.52040315", "0.51787907", "0.51730263", "0.5155762", "0.5116925", "0.51139814", "0.5112248", "0.51056176", "0.5098458", "0.5091005", "0.5087229", "0.5084313", "0.5070887", "0.50700605", "0.50675005", "0.50629526", "0.5035066", "0.502218", "0.5006372", "0.4997047", "0.49872452", "0.49866164", "0.49770626", "0.49744636", "0.4973446", "0.49720544", "0.4971229", "0.49707133", "0.49425304", "0.49380884", "0.49372208", "0.49358144", "0.4920816", "0.4917233", "0.48951998", "0.48939764", "0.48787954", "0.4864047", "0.48626003", "0.48479396", "0.48441267", "0.48294622", "0.48169076", "0.48088548", "0.47972485", "0.47899836", "0.47881636", "0.47817352", "0.4771957", "0.47712204", "0.47700742" ]
0.7838271
0
Save the tiles whose coordinates are in the input DataFrame, defined by columns x, y, and z
def save_tiles(df,output_dir,namefunc = None): if not isinstance(df,pd.core.frame.DataFrame): raise TypeError("df must be a pandas DataFrame!") if any(e not in df.columns for e in ('z','x','y')): raise ValueError("df must have columns x, y, and z") if namefunc is None: def namefunc(x,y,z): return f'{z}_{x}_{y}.png' opath = os.path.abspath(os.path.expanduser(output_dir)) Path(opath).mkdir(parents=True, exist_ok=True) L = df.shape[0] flocs = [''] * L for i,xyz in enumerate(zip(df['x'],df['y'],df['z'])): x,y,z = xyz print(f"({i+1} of {L})...") sleep(0.75) outloc = os.path.join(opath,namefunc(x,y,z)) if save_tile(x,y,z,outloc) == 0: flocs[i] = outloc df = df.assign(file_loc = flocs) return df[df['file_loc'] != '']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_coords(self, coords, output_dir):\n new_rows = []\n for i, (lat, lon) in enumerate(coords):\n row = {\n 'tile_id': i,\n 'lat':lat,\n 'long':lon,\n 'side_length': self.side_len \n }\n\n new_rows.append(row)\n\n coord_df = pd.DataFrame(new_rows)\n coord_df.to_csv(f\"{output_dir}/coordinate_map.csv\", index=False)\n print(\"done saving coordinates!\")", "def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + dx*TILE_SIZE\n offsety = BORDER_WIDTH + dy*TILE_SIZE\n view = image.view(offsetx, offsety, TILE_SIZE, TILE_SIZE)\n if imgtype:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix), imgtype)\n else:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix))", "def batch_save_tile_mask(tiles_gdf, label_poly_series, tile_size, region, zone, save_path, channels=3, display=False):\n \n import warnings; warnings.simplefilter('ignore')\n\n for idx, tile in tqdm(tiles_gdf.iterrows()):\n dataset = tile['dataset']\n tile_poly = get_specific_tile(idx, tiles_gdf)\n save_tile_mask(label_poly_series, tile_poly, tile['xyz'], tile_size, dataset,\n region, zone, save_path, channels, display)", "def find_tiles(x_index = None, y_index = None):\n db_cursor2 = self.db_connection.cursor()\n\n sql = \"\"\"-- Check for any existing tiles\nselect\n tile_id,\n x_index,\n y_index,\n tile_type_id,\n tile_pathname,\n dataset_id,\n tile_class_id,\n tile_size\nfrom tile_footprint\ninner join tile using(x_index, y_index, tile_type_id)\nwhere (%(x_index)s is null or x_index = %(x_index)s)\n and (%(y_index)s is null or y_index = %(y_index)s)\n and tile_type_id = %(tile_type_id)s\n and dataset_id = %(fc_dataset_id)s\n\n and ctime is not null -- TODO: Remove this after reload\n;\n\"\"\"\n params = {'x_index': x_index,\n 'y_index': y_index,\n 'tile_type_id': tile_type_info['tile_type_id'],\n 'fc_dataset_id': dataset_info['fc_dataset_id']}\n \n log_multiline(logger.debug, db_cursor2.mogrify(sql, params), 'SQL', '\\t')\n db_cursor2.execute(sql, params)\n tile_info = {}\n for record in db_cursor2:\n tile_info_dict = {\n 'x_index': record[1],\n 'y_index': record[2],\n 'tile_type_id': record[3],\n 'tile_pathname': record[4],\n 'dataset_id': record[5],\n 'tile_class_id': record[6],\n 'tile_size': record[7]\n }\n tile_info[record[0]] = tile_info_dict # Keyed by tile_id\n \n log_multiline(logger.debug, tile_info, 'tile_info', '\\t')\n return tile_info", "def to_xyz_tiles(\n self, root: str, tile_size: int, zoom_levels: list, driver=\"GTiff\", **kwargs\n ):\n mName = os.path.normpath(os.path.basename(root))\n\n def create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n def tile_window(shape, px):\n \"\"\"Yield (left, upper, width, height).\"\"\"\n nr, nc = shape\n lu = product(range(0, nc, px), range(0, nr, px))\n\n ## create the window\n for l, u in lu:\n h = min(px, nr - u)\n w = min(px, nc - l)\n yield (l, u, w, h)\n\n vrt_fn = None\n prev = 0\n nodata = self.nodata\n obj = self._obj.copy()\n zls = {}\n for zl in zoom_levels:\n diff = zl - prev\n pxzl = tile_size * (2 ** (diff))\n\n # read data from previous zoomlevel\n if vrt_fn is not None:\n obj = xr.open_dataarray(vrt_fn, engine=\"rasterio\").squeeze(\n \"band\", drop=True\n )\n x_dim, y_dim = obj.raster.x_dim, obj.raster.y_dim\n obj = obj.chunk({x_dim: pxzl, y_dim: pxzl})\n dst_res = abs(obj.raster.res[-1]) * (2 ** (diff))\n\n if pxzl > min(obj.shape):\n logger.warning(\n f\"Tiles at zoomlevel {zl} smaller than tile_size {tile_size}\"\n )\n\n # Write the raster paths to a text file\n sd = join(root, f\"{zl}\")\n create_folder(sd)\n txt_path = join(sd, \"filelist.txt\")\n file = open(txt_path, \"w\")\n\n for l, u, w, h in tile_window(obj.shape, pxzl):\n col = int(np.ceil(l / pxzl))\n row = int(np.ceil(u / pxzl))\n ssd = join(sd, f\"{col}\")\n\n create_folder(ssd)\n\n # create temp tile\n temp = obj[u : u + h, l : l + w]\n if zl != 0:\n temp = temp.coarsen(\n {x_dim: 2**diff, y_dim: 2**diff}, boundary=\"pad\"\n ).mean()\n temp.raster.set_nodata(nodata)\n\n if driver == \"netcdf4\":\n path = join(ssd, f\"{row}.nc\")\n temp = temp.raster.gdal_compliant()\n temp.to_netcdf(path, engine=\"netcdf4\", **kwargs)\n elif driver in gis_utils.GDAL_EXT_CODE_MAP:\n ext = gis_utils.GDAL_EXT_CODE_MAP.get(driver)\n path = join(ssd, f\"{row}.{ext}\")\n temp.raster.to_raster(path, driver=driver, **kwargs)\n else:\n raise ValueError(f\"Unkown file driver {driver}\")\n\n file.write(f\"{path}\\n\")\n\n del temp\n\n file.close()\n # Create a vrt using GDAL\n vrt_fn = join(root, f\"{mName}_zl{zl}.vrt\")\n gis_utils.create_vrt(vrt_fn, file_list_path=txt_path)\n prev = zl\n zls.update({zl: float(dst_res)})\n del obj\n\n # Write a quick data catalog yaml\n yml = {\n \"crs\": self.crs.to_epsg(),\n \"data_type\": \"RasterDataset\",\n \"driver\": \"raster\",\n \"path\": f\"{mName}_zl{{zoom_level}}.vrt\",\n \"zoom_levels\": zls,\n }\n with open(join(root, f\"{mName}.yml\"), \"w\") as f:\n yaml.dump({mName: yml}, f, default_flow_style=False, sort_keys=False)", "def xyz_from_grid(x,y,z, pnts_out):\n\tx_flt=x.flatten()\n\ty_flt=y.flatten()[::-1]\n\tz_flt=z.flatten()\n\n\tutil.check_output_dir(pnts_out)\n\tfout = open(pnts_out, 'w')\n\tfout.write(\"x,y,z\\n\")\n\n\tprint(\"Writing out %i xyz triples to %s\" %(len(z_flt),pnts_out))\n\tfor i in range(0, len(z_flt)):\n\t\tif not np.isnan(z_flt[i]):\n\t\t\tfout.write(\"%.6f,%.6f,%.2f\\n\" %(x_flt[i], y_flt[i], z_flt[i]))\n\n\tfout.close()", "def savez(d,file):\n np.savez(file,row=d.row,col=d.col,data=d.data,shape=d.shape)", "def shapely_tileset(processed_query,min_ovp = 0,max_ovp = 1,\n n_neg = None,buffer = 0):\n types, xx, yy, qual, tags = [],[],[],[],[]\n z = processed_query['zoom']\n for elem in processed_query['elements']:\n for tile in elem['tiles']:\n qq = tile[1]\n if qq >= min_ovp and qq <= max_ovp:\n x,y,_ = find_tile_coords(tile[0],z)\n xx.append(x)\n yy.append(y)\n qual.append(tile[1])\n tags.append(json.dumps(elem['tags']))\n types.append(elem['type'])\n \n pos_df = pd.DataFrame({\n 'z': z, 'x' : xx, 'y': yy, \n 'entity': types,\n 'overlap': qual,'tags': tags,\n 'placename': processed_query['query_info']['placename']\n }) \\\n .drop_duplicates(subset = ['x','y']) \\\n .sort_values(by = ['x','y'])\n if n_neg is None: n_neg = pos_df.shape[0]\n negt = sample_complement(pos_df['x'],pos_df['y'],n_neg,buffer)\n neg_df = pd.DataFrame({'z': z,'x': negt[0],'y': negt[1]}) \\\n .sort_values(by = ['x','y'])\n return { \n 'positive': add_latlon(pos_df),\n 'negative': add_latlon(neg_df)\n }", "def batch_save_tile_img(tiles_gdf, tif, tile_size, region, zone, save_path, display=False):\n for idx, tile in tqdm(tiles_gdf.iterrows()):\n dataset = tile['dataset']\n save_tile_img(tif, tile['xyz'], dataset, tile_size, region, zone, save_path, display=False)", "def save(self,filepath):\n d = self.X.tocoo(copy=False)\n v = self.col_view.tocoo(copy=False)\n np.savez(filepath,row=d.row,col=d.col,data=d.data,shape=d.shape,\n v_row=v.row,v_col=v.col,v_data=v.data,v_shape=v.shape)", "def basic_tileset(geo_dict, zooms, buffer = 0,n_neg = None):\n if not len(geo_dict['elements']):\n raise ValueError(\"The query is empty - cannot continue!\")\n if type(zooms) is int:\n zooms = [zooms]\n if any(z < 2 or z > 19 for z in zooms):\n raise ValueError(\"all zoom levels must be between 2 and 19\")\n \n nodes = atomize_features(geo_dict)\n points_list = [(node['lat'],node['lon']) for node in nodes]\n pos_DFs, neg_DFs = [], []\n\n for zoom in zooms:\n\n zxy = [(zoom,*deg2num(x,y,zoom)) for x,y in points_list]\n pos_df = pd.DataFrame.from_records(zxy,columns = ['z','x','y'])\\\n .drop_duplicates(subset = ['x','y'])\n num_neg = pos_df.shape[0] if n_neg is None else int(n_neg)\n neg_x, neg_y = sample_complement(pos_df['x'],pos_df['y'],num_neg,buffer)\n neg_df = pd.DataFrame({'z': zoom,'x': neg_x,'y': neg_y}).sort_values(by = ['z','x','y'])\n pos_DFs.append(pos_df)\n neg_DFs.append(neg_df)\n \n out_pos = add_latlon(pd.concat(pos_DFs,axis = 0))\n out_neg = add_latlon(pd.concat(neg_DFs,axis = 0))\n\n common_row = pd.merge(out_pos,out_neg,on = ['z','x','y']).shape[0]\n if common_row > 0:\n raise RuntimeError(f\"Somehow there are {common_row} common rows!\")\n return {'positive': out_pos, 'negative': out_neg }", "def save_tile_img(tif, xyz, dataset, tile_size, region, zone, save_path, display=False):\n \n prefix = f'{region}{zone}{dataset}_'\n x,y,z = xyz\n tile, mask = rt_main.tile(tif, x,y,z, tilesize=tile_size)\n if display: \n plt.imshow(np.moveaxis(tile,0,2))\n plt.show()\n \n skimage.io.imsave(f'{save_path}/{prefix}{z}_{x}_{y}.png',np.moveaxis(tile,0,2), check_contrast=False)", "def read_cells(filename):\n\n import pandas as pd\n\n min_x = -1.77\n min_y = 174.0\n min_z = -183.0\n\n size_x = 0.972\n size_y = 3.69\n size_z = 0.976\n\n frame = pd.read_csv(filename, skiprows=3)\n # frame = pd.read_csv(filename)\n\n# print(\"X range:\",min(frame['Position X']), max(frame['Position X']), \"dynamic range:\", max(frame['Position X'])-min(frame['Position X']))\n# print(\"Y range:\",min(frame['Position Y']), max(frame['Position Y']), \"dynamic range:\", max(frame['Position Y'])-min(frame['Position Y']))\n# print(\"Z range:\",min(frame['Position Z']), max(frame['Position Z']), \"dynamic range:\", max(frame['Position Z'])-min(frame['Position Z']))\n#\n # will need to check IMARIS for correspondence between exported um files and pixel values\n # X and Z on csv files are my X and Y on resliced images\n\n frame[\"Pixel X\"] = (frame['Position X'] - min_x) / size_x\n frame[\"Pixel X\"] = frame[\"Pixel X\"].round().astype(int)\n\n frame[\"Pixel Y\"] = (frame['Position Z'] - min_z) / size_z\n frame[\"Pixel Y\"] = frame[\"Pixel Y\"].round().astype(int)\n\n frame[\"Pixel Z\"] = (frame['Position Y'] - min_y) / size_y\n frame[\"Pixel Z\"] = frame[\"Pixel Z\"].round().astype(int)\n\n print(\"X pixel range:\", min(frame[\"Pixel X\"]), max(\n frame[\"Pixel X\"]), \"dynamic range:\", max(frame[\"Pixel X\"]) - min(frame[\"Pixel X\"]))\n print(\"Y pixel range:\", min(frame[\"Pixel Y\"]), max(\n frame[\"Pixel Y\"]), \"dynamic range:\", max(frame[\"Pixel Y\"]) - min(frame[\"Pixel Y\"]))\n print(\"Z pixel range:\", min(frame[\"Pixel Z\"]), max(\n frame[\"Pixel Z\"]), \"dynamic range:\", max(frame[\"Pixel Z\"]) - min(frame[\"Pixel Z\"]))\n# print(frame)\n frame.to_csv(\"frame.csv\")\n return frame", "def save_tile_mask(label_poly_series, tile_poly, xyz, tile_size, dataset, region, zone, save_path, channels = 3, display=False):\n \n \n\n prefix = f'{region}{zone}{dataset}_'\n x,y,z = xyz\n tfm = from_bounds(*tile_poly.bounds, tile_size, tile_size) \n \n cropped_polys = [poly for poly in label_poly_series if poly.intersects(tile_poly)]\n cropped_polys_gdf = gpd.GeoDataFrame(geometry=cropped_polys, crs={'init': 'epsg:4326'})\n \n fbc_mask = burn_mask(cropped_polys_gdf, tfm, tile_size, channels)\n # fbc_mask = sol.vector.mask.df_to_px_mask(df=cropped_polys_gdf,\n # channels=['footprint', 'boundary', 'contact'],\n # affine_obj=tfm, shape=(tile_size,tile_size),\n # boundary_width=5, boundary_type='inner', contact_spacing=5, meters=True)\n \n if display: \n plt.imshow(fbc_mask); plt.show()\n \n skimage.io.imsave(f'{save_path}/{prefix}{z}_{x}_{y}_mask.png',fbc_mask, check_contrast=False)", "def save_tile(x,y,z,fpath):\n UA = \"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/77.0\"\n tile_url = f\"https://{random.choice('abc')}.tile.openstreetmap.org/{z}/{x}/{y}.png\"\n # cmd = f\"wget --user-agent='please download' -O {fpath} {url}\"\n if os.path.exists(fpath):\n print(f\"Already have tile {fpath}!\")\n return 0\n if os.path.isdir(fpath):\n raise ValueError(f\"requested path {fpath} exists and is a directory!\")\n try:\n res = rq.get(\n url=tile_url,\n headers={'User-Agent': UA}\n )\n status = res.status_code\n if status == 200:\n with open(fpath,'wb') as of:\n of.write(res.content)\n return 0\n else:\n print(f\"Error: response {status} from server:\\n{res.reason}\")\n return status\n except Exception as e:\n print(f\"Error getting tile: {e}\")\n return 1", "def write_towhee_coord(self, filename):\n with open(filename, 'w') as f:\n df = self.contents[['X', 'Y', 'Z']].copy()\n np.savetxt(f, df.values, fmt=\" %20.15f\"*3)", "def tile(self, z, x, y):\n logger.debug(_(\"Render tile %s\") % ((z, x, y),))\n mercator = GlobalMercator(False,tilesize,[z])\n return self.render(mercator.tile_bbox((z, x, y)))", "def positions(self, tileID, numSamples):", "def grid(self, z, x, y, fields, layer):\n logger.debug(_(\"Render grid %s\") % ((z, x, y),))\n mercator = GlobalMercator(False,self.tilesize,[z])\n return self.render_grid(mercator.tile_bbox((z, x, y)), fields, layer)", "def save_geotiff(df, data_col, crs, x_col='x', y_col='y', time_col=None, nfiles='many', export_path='geotiff.tif', grid_res=None):\n\n ### create the xy coordinates\n if time_col is None:\n xy1 = df[[x_col, y_col]]\n else:\n time = df[time_col].sort_values().unique()\n xy1 = df.loc[df[time_col] == time[0], [x_col, y_col]]\n if any(xy1.duplicated()):\n raise ValueError('x and y coordinates are not unique!')\n\n ### Determine grid res\n if grid_res is None:\n res_df1 = (xy1.loc[0] - xy1).abs()\n res_df2 = res_df1.replace(0, nan).min()\n x_res = res_df2[x_col]\n y_res = res_df2[y_col]\n elif isinstance(grid_res, int):\n x_res = y_res = grid_res\n else:\n raise ValueError('grid_res must either be None or an integer.')\n\n ### Make the affline transformation for Rasterio\n trans2 = transform.from_origin(xy1[x_col].min() - x_res/2, xy1[y_col].max() + y_res/2, x_res, y_res)\n\n ### Make the rasters\n if time_col is None:\n z = df.set_index([y_col, x_col])[data_col].unstack().values[::-1]\n new_dataset = ras_open(export_path, 'w', driver='GTiff', height=len(xy1[y_col].unique()), width=len(xy1[x_col].unique()), count=1, dtype=df[data_col].dtype, crs=convert_crs(crs, pass_str=True), transform=trans2)\n new_dataset.write(z, 1)\n new_dataset.close()\n else:\n if nfiles == 'one':\n new_dataset = ras_open(export_path, 'w', driver='GTiff', height=len(xy1[y_col].unique()), width=len(xy1[x_col].unique()), count=len(time), dtype=df[data_col].dtype, crs=convert_crs(crs), transform=trans2)\n for i in range(1, len(time)+1):\n z = df.loc[df[time_col] == time[i - 1]].set_index([y_col, x_col])[data_col].unstack().values[::-1]\n new_dataset.write(z, i)\n new_dataset.close()\n elif nfiles == 'many':\n file1 = path.splitext(export_path)[0]\n for i in time:\n str_date = to_datetime(i).strftime('%Y-%m-%d_%H')\n file2 = file1 + '_' + str_date + '.tif'\n z = df.loc[df[time_col] == i].set_index([y_col, x_col])[data_col].unstack().values[::-1]\n new_dataset = ras_open(file2, 'w', driver='GTiff', height=len(xy1[y_col].unique()), width=len(xy1[x_col].unique()), count=1, dtype=df[data_col].dtype, crs=convert_crs(crs), transform=trans2)\n new_dataset.write(z, 1)\n new_dataset.close()", "def write_base_tile(self, tx, ty, tz, xyzzy):\n\n data_bands = range(1, self.data_bands_count+1)\n data = self.out_ds.ReadRaster(xyzzy.rx, xyzzy.ry, xyzzy.rxsize, xyzzy.rysize,\n xyzzy.wxsize, xyzzy.wysize, band_list=data_bands)\n\n image_format = self.get_base_tile_format(tx, ty, tz, xyzzy)\n\n if image_format is None:\n return\n else:\n num_bands = self.get_num_bands(image_format)\n\n if self.verbose:\n print \"\\tReadRaster Extent: \", (xyzzy.rx, xyzzy.ry, xyzzy.rxsize, xyzzy.rysize),\n print 'z =',tz,' ; x =',tx,' ; y =',ty, (xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize)\n\n dstile = self.mem_drv.Create('', self.tile_size, self.tile_size, num_bands)\n\n path = self.get_full_path(tx, ty, tz, format_extension[image_format])\n\n # Query is in 'nearest neighbour' but can be bigger in then the tilesize\n # We scale down the query to the tilesize by supplied algorithm.\n if self.tile_size == xyzzy.querysize:\n self.fill_init_dest(dstile)\n\n # Use the ReadRaster result directly in tiles ('nearest neighbour' query)\n dstile.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, data, band_list=data_bands)\n if image_format == \"PNG\":\n dstile.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, self.alpha, band_list=[num_bands])\n\n gdal_write(path, dstile, image_format)\n\n # Note: For source drivers based on WaveLet compression (JPEG2000, ECW, MrSID)\n # the ReadRaster function returns high-quality raster (not ugly nearest neighbour)\n # TODO: Use directly 'near' for WaveLet files\n else:\n # Big ReadRaster query in memory scaled to the tilesize - all but 'near' algo\n dsquery = self.mem_drv.Create('', xyzzy.querysize, xyzzy.querysize, num_bands)\n self.fill_init_dest(dsquery)\n\n dsquery.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, data, band_list=data_bands)\n if image_format == \"PNG\":\n dsquery.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, self.alpha,band_list=[num_bands])\n\n self.resampler(path, dsquery, dstile, image_format)\n\n self.alpha = None", "def store(self, dataFrame, filename):\n columns = [\"longitude\", \"latitude\", \"elevation\", \"noise_mean_day\", \"noise_mean_evening\", \"noise_mean_night\", \"noise_weighted_24h\", \"noise_mean_24h\"]\n self.store_in_csv(dataFrame, filename=filename, columns=columns)\n\n columns.insert(0, \"id\") # pandas adds a id in the front\n self.store_in_database(filename=filename, columns=columns)", "def query_image_tile(self, coord):", "def save_tile_data(tile_summary):\n\n time = Time()\n\n csv = summary_title(tile_summary) + \"\\n\" + summary_stats(tile_summary)\n\n csv += \"\\n\\n\\nTile Num,Row,Column,Tissue %,Tissue Quantity,Col Start,Row Start,Col End,Row End,Col Size,Row Size,\" + \\\n \"Color Factor,S and V Factor,Quantity Factor,Score\\n\"\n\n for t in tile_summary.tiles:\n line = \"%d,%d,%d,%4.2f,%s,%d,%d,%d,%d,%d,%d,%4.0f,%4.2f,%4.2f,%0.4f\\n\" % (\n t.tile_num, t.r, t.c, t.tissue_percentage, t.tissue_quantity().name, t.c_s, t.r_s, t.c_e, t.r_e, t.c_e - t.c_s,\n t.r_e - t.r_s,t.color_factor,\n t.s_and_v_factor, t.quantity_factor, t.score)\n csv += line\n\n data_path = slide.get_tile_data_path(tile_summary.slide_name)\n csv_file = open(data_path, \"w\")\n csv_file.write(csv)\n csv_file.close()\n\n print(\"%-20s | Time: %-14s Name: %s\" % (\"Save Tile Data\", str(time.elapsed()), data_path))", "def write_datastore(df, data_store, columns=None):\n if columns is None:\n columns = df.columns\n if 'hemisphere' in columns:\n columns.remove('hemisphere')\n df = df.sort_index()\n df[columns].to_pickle(data_store)\n log.info('saved data store: {}'.format(data_store))", "def output_grid_information():\n # translate = [-74.26, 40.50]\n # scale = [0.02, 0.02]\n # step = 1\n\n translate = [0, 0]\n scale = [1, 1]\n step = 0.02\n\n lon_limits = [(-74.26 - translate[0]) / scale[0], (-73.76 - translate[0]) / scale[0]]\n lat_limits = [(40.48 - translate[1]) / scale[1], (40.94 - translate[1]) / scale[1]]\n\n lons = np.arange(lon_limits[0], lon_limits[1] - step, step)\n lats = np.arange(lat_limits[0], lat_limits[1] - step, step)\n\n all_json = {\n \"type\": \"FeatureCollection\"\n }\n\n gr_id = 0\n grid_df = pd.DataFrame(columns=['gr_id', 'c_lat', 'c_lon', 's_lon', 'w_lat', 'n_lon', 'e_lat'])\n features = []\n\n for lat in lats:\n for lon in lons:\n w_lon = lon\n e_lon = lon + step\n s_lat = lat\n n_lat = lat + step\n\n c_lon = lon + step / 2\n c_lat = lat + step / 2\n\n grid_df = grid_df.append(pd.DataFrame({\"gr_id\": [gr_id],\n \"c_lon\": [c_lon], \"c_lat\": [c_lat],\n \"w_lon\": [w_lon], \"s_lat\": [s_lat],\n \"e_lon\": [e_lon], \"n_lat\": [n_lat]}))\n\n coor = [[[s_lat, w_lon], [n_lat, w_lon], [n_lat, e_lon],\n [s_lat, e_lon], [s_lat, w_lon]]]\n\n feature = {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": coor\n },\n \"properties\": {\n \"id\": str(gr_id)\n }\n }\n\n features.append(feature)\n\n gr_id += 1\n\n all_json['features'] = features\n\n with open(BaseDir + '/grid.geojson', 'w') as f:\n json.dump(all_json, f)\n\n grid_df.to_csv(BaseDir + '/grid_locs.csv', index=False)", "def create_tiles(self, zoom):\n # Compute the tile x-y-z index range for the rasterlayer for this zoomlevel\n bbox = self.rasterlayer.extent()\n indexrange = tiler.tile_index_range(bbox, zoom)\n\n # Compute scale of tiles for this zoomlevel\n tilescale = tiler.tile_scale(zoom)\n\n # Count the number of tiles that are required to cover the raster at this zoomlevel\n nr_of_tiles = (indexrange[2] - indexrange[0] + 1) * (indexrange[3] - indexrange[1] + 1)\n\n # Create destination raster file\n self.log('Snapping dataset to zoom level {0}'.format(zoom))\n\n bounds = tiler.tile_bounds(indexrange[0], indexrange[1], zoom)\n sizex = (indexrange[2] - indexrange[0] + 1) * self.tilesize\n sizey = (indexrange[3] - indexrange[1] + 1) * self.tilesize\n dest_file = os.path.join(self.tmpdir, 'djangowarpedraster' + str(zoom) + '.tif')\n\n snapped_dataset = self.dataset.warp({\n 'name': dest_file,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'width': sizex,\n 'height': sizey,\n })\n\n self.log('Creating {0} tiles for zoom {1}.'.format(nr_of_tiles, zoom))\n\n counter = 0\n for tilex in range(indexrange[0], indexrange[2] + 1):\n for tiley in range(indexrange[1], indexrange[3] + 1):\n # Log progress\n counter += 1\n if counter % 250 == 0:\n self.log('{0} tiles created at zoom {1}'.format(counter, zoom))\n\n # Calculate raster tile origin\n bounds = tiler.tile_bounds(tilex, tiley, zoom)\n\n # Construct band data arrays\n pixeloffset = (\n (tilex - indexrange[0]) * self.tilesize,\n (tiley - indexrange[1]) * self.tilesize\n )\n\n band_data = [\n {\n 'data': band.data(offset=pixeloffset, size=(self.tilesize, self.tilesize)),\n 'nodata_value': band.nodata_value\n } for band in snapped_dataset.bands\n ]\n\n # Add tile data to histogram\n if zoom == self.max_zoom:\n self.push_histogram(band_data)\n\n # Warp source raster into this tile (in memory)\n dest = GDALRaster({\n 'width': self.tilesize,\n 'height': self.tilesize,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'srid': WEB_MERCATOR_SRID,\n 'datatype': snapped_dataset.bands[0].datatype(),\n 'bands': band_data,\n })\n\n # Store tile\n RasterTile.objects.create(\n rast=dest,\n rasterlayer=self.rasterlayer,\n tilex=tilex,\n tiley=tiley,\n tilez=zoom\n )\n\n # Store histogram data\n if zoom == self.max_zoom:\n bandmetas = RasterLayerBandMetadata.objects.filter(rasterlayer=self.rasterlayer)\n for bandmeta in bandmetas:\n bandmeta.hist_values = self.hist_values[bandmeta.band].tolist()\n bandmeta.save()\n\n # Remove snapped dataset\n self.log('Removing snapped dataset.', zoom=zoom)\n snapped_dataset = None\n os.remove(dest_file)", "def find_tile(loc, dir):\n #returns the integer tile number\n \n # should be looking in the directory with supergrid data (probably \"fix\" directory)\n filename_pattern = '*grid.tile*.nc'\n \n #find all supergrid files in the directory\n grid_fnames = []\n for f_name in os.listdir(dir):\n if fnmatch.fnmatch(f_name, filename_pattern):\n grid_fnames.append(f_name)\n if not grid_fnames:\n message = 'No filenames matching the pattern {0} found in {1}'.format(filename_pattern,dir)\n logging.critical(message)\n raise Exception(message)\n \n #non-polar tiles can use traditional 2D point-in-polygon methods; if a point is not in a non-polar tile,\n #it is in one of the polar tiles, and the tile can be distinguished by the sign of latitude of the point\n polar_tile_filenames = []\n found_tile = False\n for f_name in grid_fnames:\n if not found_tile:\n nc_file = Dataset('{0}/{1}'.format(dir,f_name))\n longitude = np.array(nc_file['x']).swapaxes(0,1)\n latitude = np.array(nc_file['y']).swapaxes(0,1)\n nc_file.close()\n \n adj_long = False \n #look for reversal of longitude; if found, adjust longitude so that 0-360 transition doesn't exist\n for row in longitude:\n if not (np.all(np.diff(row) >= 0) or np.all(np.diff(row) <= 0)):\n adj_long = True\n if adj_long:\n longitude[longitude < 180] += 360\n \n #get lon/lat pairs for all edges of the tiles\n \n edge_1_lon = longitude[0,:]\n edge_1_lat = latitude[0,:]\n edge_1 = list(zip(edge_1_lon, edge_1_lat))\n \n edge_2_lon = longitude[:,-1]\n edge_2_lat = latitude[:,-1]\n edge_2 = list(zip(edge_2_lon, edge_2_lat))\n \n edge_3_lon = longitude[-1,:]\n edge_3_lat = latitude[-1,:]\n edge_3 = list(zip(edge_3_lon, edge_3_lat))\n edge_3.reverse() #need to reverse the direction of this edge to form a regular polygon\n \n edge_4_lon = longitude[:,0]\n edge_4_lat = latitude[:,0]\n edge_4 = list(zip(edge_4_lon, edge_4_lat))\n edge_4.reverse() #need to reverse the direction of this edge to form a regular polygon\n \n polygon_points = edge_1 + edge_2 + edge_3 + edge_4\n \n tile_polygon = Polygon(polygon_points)\n tile_polygon = tile_polygon.simplify(0)\n \n if tile_polygon.is_valid: #this will be True unless the tile is a polar tile, which will not form a regular polygon in Cartesian space using lon/lat data\n temp_loc = copy.deepcopy(loc)\n if adj_long:\n if loc[0] < 180:\n temp_loc[0] += 360\n loc_point = Point(temp_loc)\n if tile_polygon.contains(loc_point):\n found_tile = True\n return f_name.split('tile')[1].split('.nc')[0] \n else:\n polar_tile_filenames.append(f_name)\n \n #if the tile hasn't been found by this point, it must be contained within a polar tile\n for f_name in polar_tile_filenames:\n nc_file = Dataset('{0}/{1}'.format(dir,f_name))\n latitude = np.array(nc_file['y']).swapaxes(0,1)\n nc_file.close()\n \n #if the sign of the mean latitude of the tile is the same as that of the point, the tile has been found\n if np.sign(np.mean(latitude)) == np.sign(loc[1]):\n found_tile = True\n return f_name.split('tile')[1].split('.nc')[0] \n return -1", "def save_GRID( self , filename ):\n self._fwrite_GRID( filename )", "def save_tiles(self, tiles, output_dir):\n save_path = f\"{output_dir}/tiles.npy\"\n tiles_np = np.asarray(tiles)\n np.save(save_path, tiles_np)\n print(\"done saving .npy!\")", "def get_tiles():\n\t\t\n\tcursor = get_cursor()\n\t\n\tcursor.execute(\"SELECT * FROM fitmeimages ORDER BY shade ASC, id ASC\")\n\treturn cursor.fetchall();", "def save_grd(filename, meta, map):\n if os.path.exists(filename):\n raise ValueError(\"File already exists: {}\".format(filename))\n if map.shape != (meta['NX'], meta['NY'], meta['NCOMP']):\n raise ValueError(\"The map shape does not match the metadata dictionary.\")\n points = meta['NX'] * meta['NY']\n components = meta['NCOMP']\n data = np.empty((points, 2 * components))\n for component in range(components):\n data[:, 2 * component] = map[:, :, component].reshape(points, order='F').real\n data[:, 2 * component + 1] = map[:, :, component].reshape(points, order='F').imag\n with open(filename, 'w') as f:\n for line in meta['header']:\n f.write('{}\\n'.format(line))\n f.write('{:2d}\\n'.format(meta['KTYPE']))\n f.write('{:12d}{:12d}{:12d}{:12d}\\n'.format(meta['NSET'], meta['ICOMP'], meta['NCOMP'], meta['IGRID']))\n f.write('{:12d}{:12d}\\n'.format(meta['IX'], meta['IY']))\n f.write(' {: 0.10E} {: 0.10E} {: 0.10E} {: 0.10E}\\n'.format(meta['XS'], meta['YS'], meta['XE'], meta['YE']))\n f.write('{:12d}{:12d}{:12d}\\n'.format(meta['NX'], meta['NY'], meta['KLIMIT']))\n for p in range(points):\n f.write(''.join([float_to_string(number) for number in data[p, :]]) + '\\n')", "def renderMetaTile(z, x, y, ntiles, hypsoreliefMap, landcoverreliefMap, areasMap, oceanMap, contoursMap, featuresMap):\n hypsorelief = renderLayer('hypsorelief', z, x, y, ntiles, hypsoreliefMap, 'png')\n landcoverrelief = renderLayer('landcoverrelief', z, x, y, ntiles, landcoverreliefMap, 'png')\n areas = renderLayer('areas', z, x, y, ntiles, areasMap, 'png')\n ocean = renderLayer('ocean', z, x, y, ntiles, oceanMap, 'png', True)\n contours = renderLayer('contours', z, x, y, ntiles, contoursMap, 'png', True)\n features = renderLayer('features', z, x, y, ntiles, featuresMap, 'png', True)\n base_h = getComposite((hypsorelief, areas, ocean))\n base_l = getComposite((landcoverrelief, ocean))\n composite_h = getComposite((base_h, contours, features))\n composite_l = getComposite((base_l, contours, features))\n saveTiles(z, x, y, ntiles, 'composite_h', composite_h)\n saveTiles(z, x, y, ntiles, 'composite_l', composite_l)\n if SAVE_JPEG_COMPOSITE:\n basename = 'jpeg' + str(JPEG_COMPOSITE_QUALITY)\n saveTiles(z, x, y, ntiles, basename+'_h', composite_h, 'jpg', basename)\n saveTiles(z, x, y, ntiles, basename+'_l', composite_l, 'jpg', basename)\n if SAVE_INTERMEDIATE_TILES:\n saveTiles(z, x, y, ntiles, 'base_h', base_h)\n saveTiles(z, x, y, ntiles, 'base_l', base_l)\n saveTiles(z, x, y, ntiles, 'contours', contours)\n saveTiles(z, x, y, ntiles, 'hypsorelief', hypsorelief)\n saveTiles(z, x, y, ntiles, 'landcoverrelief', landcoverrelief)\n saveTiles(z, x, y, ntiles, 'areas', areas)\n saveTiles(z, x, y, ntiles, 'ocean', ocean)\n saveTiles(z, x, y, ntiles, 'features', features)", "def _tunnel_shearzone_data(self):\n file_loc = self.data_path / \"03_GeologicalMapping\" / \"01_TunnelIntersections\"\n columns = [\"x\", \"y\", \"z\", \"true_dip_direction\", \"dip\", \"tunnel\", \"shearzone\"]\n\n path = file_loc / \"Tunnel_intersections.txt\"\n df = pd.read_csv(path, sep=None, names=columns, engine=\"python\")\n df[\"shearzone\"] = df[\"shearzone\"].apply(rename_sz)\n df = df.rename(\n columns={\n \"true_dip_direction\": \"azimuth_struc\",\n \"tunnel\": \"borehole\",\n }\n )\n return df", "def test_all_three_w_save(self):\n output_mask = df_to_px_mask(\n os.path.join(data_dir, 'sample.csv'),\n channels=['footprint', 'boundary', 'contact'],\n boundary_type='outer', boundary_width=5, contact_spacing=15,\n geom_col='PolygonWKT_Pix',\n reference_im=os.path.join(data_dir, \"sample_geotiff.tif\"),\n out_file=os.path.join(data_dir, 'test_out.tif'))\n truth_mask = skimage.io.imread(\n os.path.join(data_dir, 'sample_fbc_from_df2px.tif')\n )\n saved_output_mask = skimage.io.imread(os.path.join(data_dir,\n 'test_out.tif'))\n\n assert np.array_equal(output_mask, truth_mask)\n assert np.array_equal(saved_output_mask, truth_mask)\n os.remove(os.path.join(data_dir, 'test_out.tif')) # clean up after", "def save_fits(df, fname):\n df = df.reset_index()\n outtable = Table.from_pandas(df)\n Path(fname).parent.mkdir(parents=True, exist_ok=True)\n outtable.write(fname, format='fits', overwrite=True)", "def update_img_from_df(df, image, keep=0, dimensions=3, colname='label', inside_value=1, outside_value=0):\n for index, row in df.iterrows():\n if dimensions == 2:\n x, y, label = (row['x'], row['y'], row[colname])\n if label == keep:\n image.SetPixel(x, y, inside_value)\n else:\n image.SetPixel(x, y, outside_value)\n elif dimensions == 3:\n x, y, z, label = (row['x'], row['y'], row['z'], row[colname])\n if label == keep:\n image.SetPixel(x, y, z, inside_value)\n else:\n image.SetPixel(x, y, z, outside_value)\n else:\n raise Exception('Unsupported number of dimensions')", "def get_area_tile_matrix(list_points: List, zoom: int, use_polygon: bool = False) -> pd.DataFrame:\n tiles = []\n for point in list_points:\n tiles += [Utility.get_tile(*point, zoom)]\n left_col = min(tiles, key = lambda item: item[0])[0]\n right_col = max(tiles, key = lambda item: item[0])[0]\n top_row = min(tiles, key = lambda item: item[1])[1]\n bottom_row = max(tiles, key = lambda item: item[1])[1] # notice bottom_row would actually have a higher number\n matrix = pd.DataFrame(index = range(bottom_row - top_row + 1), columns = range(right_col - left_col + 1))\n for row in range(len(matrix)):\n for col in range(len(matrix.iloc[0])):\n matrix.iloc[row,col] = (left_col + col, top_row + row)\n\n if use_polygon:\n polygon = Utility.produce_polygon(list_points, zoom, plot_polygon = False)\n for row in range(len(matrix)):\n for col in range(len(matrix.iloc[0])):\n if matrix.iloc[row,col] in tiles: # make sure the polygon points are covered\n continue\n if not polygon.contains_point(matrix.iloc[row,col]):\n matrix.iloc[row,col] = None\n\n return matrix", "def get_time_series(self, tile, year, loc, px, py, cols=1, rows=1, step=1, save_dir='txt/'):\n\n # keys = ['sza', 'saa', 'vza', 'vaa', 'qa', 'b01', 'b02', 'b03', 'b04', 'b05', 'b06', 'b07']\n\n timeBefore = time.clock()\n\n f_list = np.array(glob.glob(loc + '%s_%d_*_1km.nc' % (tile, year)))\n\n # if there are no files for this year\n if f_list.shape[0] == 0:\n print 'Data for year %d and tile %s not found' % (year, tile)\n return -1\n\n # get DOYs from file name\n doys0 = np.sort([int(s.split('_')[4]) for s in f_list])\n # print 'doys0', doys0\n\n # arrange DOYs according to step. I.e. we assume that...\n # step = 7\n doys = np.zeros(doys0.shape).astype(int)\n doys_real = np.zeros(doys0.shape).astype(int)\n\n for jj, ii in enumerate(xrange(0, doys0.shape[0], step)):\n # doys = np.append(doys, doys0[ii:ii+7])\n doys[ii:ii+step] = jj + 1#doys0[ii]\n doys_real[ii:ii+step] = doys0[ii]\n\n ind = np.argsort([int(s.split('_')[4]) for s in f_list])\n f_list = f_list[ind]\n\n # print 'loc:', loc\n # print 'tile:', tile\n # print 'year:', year\n # print 'f_list:', f_list[0]\n\n output = {}\n for f in f_list:\n print 'f:', f\n try:\n ds = nc.Dataset(f)\n except:\n print 'something wrong with %s' % f\n continue\n\n # if a dataset is empty\n # i.e. if it has less than 2 bands\n # i.e. vza, vaa, sza, saa, proj, geo, qa, b1, b2\n if len(ds.variables.keys()) < 9:\n print 'Dataset exists but empty:'\n print 'loc:', loc\n print 'tile:', tile\n print 'year:', year\n print 'f_list:', f_list[0]\n print ''\n # return -1\n else:\n break\n if f == f_list[-1]:\n print 'all datasets are empty'\n return -1\n\n for key in ds.variables.keys():\n if len(ds.variables[key].shape) == 2:\n # output[key] = np.zeros((f_list.shape[0], ds.variables[key].shape[0], ds.variables[key].shape[1]))\n output[key] = np.zeros((f_list.shape[0], cols, rows))\n if len(ds.variables[key].shape) == 1:\n output[key] = np.zeros(ds.variables[key].shape[0]).astype(str)\n\n for i, fname in enumerate(f_list):\n #print fname\n ds = nc.Dataset(fname)\n for key in ds.variables.keys():\n if len(ds.variables[key].shape) == 2:\n\n try:\n # print ds.variables[key][px:px+cols, py:py+rows]\n output[key][i, :, :] = ds.variables[key][px:px+cols, py:py+rows]\n except:\n print 'something wrong in output[%s][%d, :, :]' % (key, i)\n print 'output:', output[key][i, :, :].shape\n print 'ds.variables:', ds.variables[key][px:px+cols, py:py+rows].shape\n\n if len(ds.variables[key].shape) == 1:\n output[key][:] = ds.variables[key][:]\n\n # print 'output.keys:', output.keys()\n\n QA_OK = np.array([8, 72, 136, 200, 1288, 2056, 2120, 2184, 2248])\n qa_passer = np.logical_or.reduce([output['qa'] == x for x in QA_OK])\n\n for b in [1,2,3,4,5,7]:\n\n qa_passer[output[\"b0%d\" % b] <= 0] = 0.\n qa_passer[output[\"b0%d\" % b] >= 10000] = 0.\n\n # if, for this pixel, we have just a few observations we don't need them\n if np.sum(qa_passer) < 2:\n qa_passer[:] = 0\n\n output['qa_passer'] = qa_passer\n [bin(b) for b in QA_OK]\n\n #doys = np.array([int(g.GetRasterBand(b+1).GetMetadata()['DoY']) for b in xrange(g.RasterCount)])\n #years = np.array([int(g.GetRasterBand(b + 1).GetMetadata()['Year']) for b in xrange(g.RasterCount)])\n output['doys'] = doys\n output['doys_real'] = np.unique(doys_real)\n output['years'] = np.ones(doys.shape) * year\n\n output[\"sza\"] = output[\"sza\"] / 100.\n output[\"saa\"] = output[\"saa\"] / 100.\n output[\"vza\"] = output[\"vza\"] / 100.\n output[\"vaa\"] = output[\"vaa\"] / 100.\n output['b01'] = output['b01'] / 10000.\n output['b02'] = output['b02'] / 10000.\n output['b03'] = output['b03'] / 10000.\n output['b04'] = output['b04'] / 10000.\n output['b05'] = output['b05'] / 10000.\n output['b06'] = output['b06'] / 10000.\n output['b07'] = output['b07'] / 10000.\n\n # print 'qa_passer:', output['qa_passer']\n #print output['b01']\n\n #pkl.dump(output, open(f_out, 'wb'))\n\n timeAfter = time.clock()\n elapsed_time = timeAfter - timeBefore\n print 'read time series time (s): ', elapsed_time\n\n print 'Read MODIS for year %d data is done' % year\n return output", "def createCoordTuples(data):\n data['xy'] = None\n for i, row in data.iterrows():\n data['xy'][i] = [np.round(row['geometry'].x, decimals=5), np.round(row['geometry'].y, decimals=5)]\n return data", "def ImportControlPz3D(piez_path,sheet_name,geol_layer,layer_num,geol_col,grid,nlay,np_col=\"NP\",x_col=\"x\",y_col=\"y\"):\n \n data = pd.read_excel(piez_path,sheet_name=sheet_name)\n\n geol_layer = geol_layer\n layer_num = layer_num\n\n Control_pz = np.zeros([nlay,grid.nrow,grid.ncol]) #ini list\n\n for ilay in range(len(geol_layer)): # go through each different lithology\n lstIDpz=[]\n Pz=[]\n DB = data[data[geol_col]==geol_layer[ilay]]\n DB.reset_index(inplace=True)\n for o in np.arange(DB.shape[0]): # loop to iterate through the data and returns the intersected cellids\n xc = DB[x_col][o]\n yc = DB[y_col][o] \n cellid = grid.intersect(xc,yc)\n\n if not np.isnan(DB[np_col][o]):\n lstIDpz.append(cellid)\n Pz.append(DB[np_col][o])\n\n df = pd.DataFrame()\n df[\"cellid\"]=lstIDpz\n df[\"Pz\"] = Pz\n df = df.groupby([\"cellid\"]).mean().reset_index() # group pz and apply mean on the same cell\n\n for i in df.index:\n j,k = df.loc[i,\"cellid\"]\n Control_pz[layer_num[ilay],j,k] = df.loc[i,\"Pz\"]\n\n return Control_pz", "def dst(df):\n pass", "def save_n3d_coords(file_path, coords_dict, seq_pos_dict): \n \n file_obj = open(file_path, 'w')\n write = file_obj.write\n \n for chromo in seq_pos_dict:\n chromo_coords = coords_dict[chromo]\n chromo_seq_pos = seq_pos_dict[chromo]\n \n num_models = len(chromo_coords)\n num_coords = len(chromo_seq_pos)\n \n if chromo[:3].lower() != 'chr':\n chromo_name = 'chr' + chromo\n else:\n chromo_name = chromo\n \n line = '%s\\t%d\\t%d\\n' % (chromo_name, num_coords, num_models)\n write(line)\n \n for j in range(num_coords):\n data = chromo_coords[:,j].ravel().tolist()\n data = '\\t'.join('%.8f' % d for d in data)\n \n line = '%d\\t%s\\n' % (chromo_seq_pos[j], data)\n write(line)\n\n file_obj.close()", "def save_phot_fits(df, fname):\n keep_col_phot = [\"MJD\", \"FLUXCAL\", \"FLUXCALERR\", \"FLT\"]\n # eliminate repeated rows (-777.0)\n df_phot = df.copy()\n df_phot = df_phot.loc[df_phot[\"FLUXCAL\"].shift() != df_phot[\"FLUXCAL\"]]\n\n if df_phot.MJD.values[-1] == -777.0:\n df_phot = df_phot.drop(df_phot.index[-1])\n if df_phot.MJD.values[0] == -777.0:\n df_phot = df_phot.drop(df_phot.index[0])\n\n mask_seven = df_phot['MJD'] == -777.0\n df_phot.loc[mask_seven, 'SNID'] = 0\n\n df_phot = df_phot.reset_index()\n df_phot_saved = df_phot[keep_col_phot]\n save_fits(df_phot_saved, fname)\n return df_phot", "def write_overview_tile(self, tx, ty, tz,tms_osm):\n\n image_format = self.get_overview_tile_format(tx, ty, tz)\n\n if image_format is None:\n return\n else:\n num_bands = self.get_num_bands(image_format)\n\n dsquery = self.mem_drv.Create('', 2*self.tile_size, 2*self.tile_size, num_bands)\n self.fill_init_dest(dsquery)\n # tms: z=19: 281626\n # -z=18-140813 176168*2=352336; 176168*2+1=352337\n # -- 352336,352337\n y_from=2*ty\n y_to=2*ty + 1\n ty_tms=ty;\n s_y_type=\"tms\"\n if tms_osm:\n # osm: z=19: 281626\n # -z=18-140813 85975*2+1=171951; 85975*2=171950\n # -- 171951,171950 [in range: last/end not used]\n y_from=2*ty + 1\n y_to=2*ty\n ty_tms=(2**tz-1) - ty\n s_y_type=\"osm\"\n s_tile_id=\"{0}-{1}-{2}.{3}\".format(str(tz), str(tx),str(ty),s_y_type)\n if self.verbose:\n # Build from zoom 19 tiles: (281626, 171951) (281627, 171951) (281626, 171950) (281627, 171950)\n print \"\\tBuild [\",s_tile_id,\"] from [\",self.output_dir,\"] zoom\", tz+1,\" tiles [\",s_y_type,\"]: \", (2*tx, y_from), (2*tx+1, y_from),(2*tx, y_to), (2*tx+1, y_to)\n\n for cx, cy, child_image_format in self.iter_children(tx, ty, tz):\n if (ty_tms==0 and cy==1) or (ty_tms!=0 and (cy % (y_from)) != 0):\n tileposy = 0\n else:\n tileposy = self.tile_size\n if tx:\n tileposx = cx % (2*tx) * self.tile_size\n elif tx==0 and cx==1:\n tileposx = self.tile_size\n else:\n tileposx = 0\n\n path = self.get_full_path(cx, cy, tz+1, format_extension[child_image_format])\n\n dsquerytile = gdal.Open(path, gdal.GA_ReadOnly)\n\n dsquery.WriteRaster(tileposx, tileposy, self.tile_size, self.tile_size,\n dsquerytile.ReadRaster(0, 0, self.tile_size, self.tile_size),\n band_list=range(1, dsquerytile.RasterCount+1))\n\n if image_format == \"PNG\" and dsquerytile.RasterCount != num_bands:\n dsquery.WriteRaster(tileposx, tileposy, self.tile_size, self.tile_size,\n self.get_alpha_filler(), band_list=[num_bands])\n\n dstile = self.mem_drv.Create('', self.tile_size, self.tile_size, num_bands)\n path = self.get_full_path(tx, ty, tz, format_extension[image_format])\n self.resampler(path, dsquery, dstile, image_format)", "def arrange_tiles(self, layer):\n\n # número de tiles en 'x'\n width = self.width\n arranged_tiles = layer.arranged_tiles\n\n row = -1\n\n # convierte una lista en un diccionario\n for col, tile in enumerate(layer.tiles):\n # calcula la ubicación en dos dimensiones (fila y columna) de cada tile,\n # los tiles originalmente están ordenados en línea\n col %= width\n if col == 0:\n row += 1\n\n # excluye los tiles con id 0,\n # id 0 representa un espacio vacío en el tilemap\n if tile != 0:\n arranged_tiles[(row, col)] = tile\n\n # libera la memoria ocupada por la lista de tiles\n layer.tiles = None", "def tile(self, (z, x, y)):\n output = self.cache.read((z, x, y))\n if output is None:\n # logger.info(_(\"TilesManager.tile calling sources.tile: \") )\n pass\n output = self.reader.tile(z, x, y)\n if output is None:\n return None\n # Blend layers\n if len(self._layers) > 0:\n logger.debug(_(\"Will blend %s layer(s)\") % len(self._layers))\n output = self._blend_layers(output, (z, x, y))\n # Apply filters\n for f in self._filters:\n image = f.process(self._tile_image(output))\n output = self._image_tile(image)\n # Save result to cache\n self.cache.save(output, (z, x, y))\n self.rendered += 1\n return output", "def tile(self, (z, x, y)):\n output = self.cache.read((z, x, y))\n if output is None:\n output = self.reader.tile(z, x, y)\n # Blend layers\n if len(self._layers) > 0:\n logger.debug(_(\"Will blend %s layer(s)\") % len(self._layers))\n output = self._blend_layers(output, (z, x, y))\n # Apply filters\n for f in self._filters:\n image = f.process(self._tile_image(output))\n output = self._image_tile(image)\n # Save result to cache\n self.cache.save(output, (z, x, y))\n self.rendered += 1\n return output", "def xmsmesh_to_dataframe(pts, cells):\n r_pts = pd.DataFrame(pts, columns=['x', 'y', 'z'])\n r_cells = pd.DataFrame([(cells[ x +2], cells[ x +3], cells[ x +4]) for x in range(0, len(cells), 5)], columns=['v0', 'v1', 'v2'])\n return r_pts, r_cells", "def find_tiles(self):\n lat1, lat2 = self.bbox.south, self.bbox.north\n lon1, lon2 = self.bbox.west, self.bbox.east\n # convert to geographic bounding box\n minlat, minlon = min(lat1, lat2), min(lon1, lon2)\n maxlat, maxlon = max(lat1, lat2), max(lon1, lon2)\n\n # convert to tile-space bounding box\n _, xmin, ymin = self.mercator(maxlat, minlon, self.zoom)\n _, xmax, ymax = self.mercator(minlat, maxlon, self.zoom)\n\n # generate a list of tiles\n xs, ys = range(xmin, xmax + 1), range(ymin, ymax + 1)\n tile_list = [(self.zoom, x, y) for (y, x) in product(ys, xs)]\n\n return tile_list", "def take_snapshot():\n df = scrape()\n for i in df.index:\n single = df.loc[i]\n # create or get locations\n loc, created = Location.objects.get_or_create(\n name=single['Location'],\n all_stands=single['Stands'],\n coordinates=single['Coords']\n )\n # add a new snapshot\n obj = Snapshot(\n location=loc,\n avail_bikes=single['Bikes'],\n free_stands=single['Free stands'],\n timestamp=datetime.now(tz=timezone('Europe/Warsaw'))\n )\n obj.save()", "def _calc_coords(self, X, Y, Z):\r\n def _write_coords(coord):\r\n XX.append(X[coord])\r\n YY.append(Y[coord])\r\n ZZ.append(Z[coord])\r\n\r\n def _build_layer():\r\n for j in range(self.size[1]):\r\n for i in range(self.size[0]):\r\n # write NW corner\r\n if i == 0:\r\n nwCoord = 2 * i + 4 * self.size[0] * j + const\r\n _write_coords(nwCoord)\r\n # write NE corner\r\n neCoord = 2 * i + 4 * self.size[0] * j + const + 1\r\n _write_coords(neCoord)\r\n if j == self.size[1] - 1:\r\n for i in range(self.size[0]):\r\n # write SW corner\r\n if i == 0:\r\n swCoord = 2 * i + 4 * self.size[0] * j + 2 * self.size[0] + const\r\n _write_coords(swCoord)\r\n # write SE corner\r\n seCoord = 2 * i + 4 * self.size[0] * j + 2 * self.size[0] + const + 1\r\n _write_coords(seCoord)\r\n\r\n # At this point, we have all points needed for unstructured grid in X,Y,Z\r\n # However, they must be re-arranged so we can define Hexahedrons\r\n # TODO: REFINE CELLS\r\n # PSUEDO:\r\n # find cell to be refined\r\n # add new cells (as easy as pie)\r\n\r\n XX, YY, ZZ = ([] for i in range(3))\r\n const = 0\r\n for k in range(self.size[2]):\r\n _build_layer()\r\n if k == self.size[2] - 1:\r\n const += self.size[0] * self.size[1] * 4\r\n _build_layer()\r\n break\r\n else:\r\n const += self.size[0] * self.size[1] * 8\r\n return XX, YY, ZZ", "def site_frame_to_global(df):\n\n # Swap x and y axes, and invert z\n df = swap_columns(df,'x','y')\n df = swap_columns(df,'xe','ye')\n df.loc[:,\"z\"] *= -1\n return df", "def copy_to_scratch(src_info, dest_info, coords_xyz):\n logger.info(\"Sampling coordinates\")\n src_svs = []\n for coord_xyz in tqdm(coords_xyz):\n coord_zyx = coord_xyz[::-1]\n src_sv = fetch_label_for_coordinate(src_info, coord_zyx, supervoxels=True)\n src_svs.append(src_sv)\n\n df = pd.DataFrame(coords_xyz, columns=list('xyz'))\n df['sv'] = src_svs\n\n bad_rows = df.query('sv == 0')\n if len(bad_rows) > 0:\n logger.error(\"ERROR: Some coordinates mapped to empty segmentation:\")\n logger.error(str(bad_rows[['x', 'y', 'z']].values.tolist()))\n \n df.query('sv != 0', inplace=True)\n df.drop_duplicates(['sv'], inplace=True)\n\n logger.info(\"Copying RLEs\")\n copy_infos = copy_splits(df['sv'].values, src_info, dest_info)\n\n return df[['x', 'y', 'z']].values, copy_infos", "def get_tile(tilefile,level,x,y):\n\t\n\ttf=file(tilefile,\"r\")\n\t\n\ttd=pickle.load(tf)\n\ta=td[(level,x,y)]\n\t\n\ttf.seek(a[0],1)\n\tret=tf.read(a[1])\n\t\n\ttf.close()\n\treturn ret", "def write_array(uri: str):\n a1_data = np.reshape(np.arange(1, 26), (5, 5))\n l1_data = np.arange(5, 0, -1)\n l2_data = np.arange(-2, 3)\n l3_data = np.linspace(-1.0, 1.0, 5)\n with tiledb.open(uri, \"w\") as array:\n array[:] = {\"a1\": a1_data, \"l1\": l1_data, \"l2\": l2_data, \"l3\": l3_data}", "def filtering(self, Z):\n coord_x_est = [0]\n coord_y_est = [0]\n time_est = [Z[['time']].iloc[0][0]]\n for i in range(1, len(Z.x)):\n new_dt = Z[['time']].iloc[i][0] - time_est[-1]\n while (new_dt > 150):\n new_state = [coord_x_est[-1], coord_y_est[-1], time_est[-1]]\n new_state = self._restore(new_state)\n coord_x_est.append(new_state[0])\n coord_y_est.append(new_state[1])\n time_est.append(new_state[2])\n new_dt -= 100\n\n# self.configure(new_dt)\n self._predict()\n mes = Z[['x', 'y']].iloc[i].to_numpy()\n self._update(np.resize(mes, (2, 1)))\n # save for latter plotting\n coord_x_est.append(self.x[0])\n coord_y_est.append(self.x[4])\n time_est.append(int(Z[['time']].iloc[i]))\n\n return pd.DataFrame({'X_f': coord_x_est,\n 'Y_f': coord_y_est,\n 'time': time_est\n })", "def write_grid(self):\n \n self.fout = self.create_savename()\n ncout = Dataset(self.fout, 'w')\n print('Writing: %s' % self.fout)\n \n # Create dimensions\n lon = ncout.createDimension(self.xvar, self.nx)\n lat = ncout.createDimension(self.yvar, self.ny)\n depth = ncout.createDimension(self.zvar, self.nz)\n tdim = ncout.createDimension('time', None)\n bndsDim = ncout.createDimension('bnds', 2)\n\n # Create variables\n varx = ncout.createVariable(self.xvar, 'float64', (self.xvar,))\n vary = ncout.createVariable(self.yvar, 'float64', (self.yvar,))\n varz = ncout.createVariable(self.zvar, 'float64', (self.zvar,))\n\n varx.standard_name = 'longitude'\n varx.units = 'degrees'\n ncout.variables['LONGITUDE'].bounds = 'lon_bnds'\n lonBndsVar = ncout.createVariable('lon_bnds', 'float64', (self.xvar, 'bnds'))\n xboundaries = np.concatenate([self.xminbounds, np.reshape(self.xmaxbounds[-1],(1,1))[0]])\n lonBndsVar[:,:] = np.array([xboundaries[:-1], xboundaries[1:]]).T\n\n vary.standard_name = 'latitude'\n vary.units = 'degrees'\n ncout.variables['LATITUDE'].bounds = 'lat_bnds'\n latBndsVar = ncout.createVariable('lat_bnds', 'float64', (self.yvar, 'bnds'))\n yboundaries = np.concatenate([self.yminbounds, np.reshape(self.ymaxbounds[-1],(1,1))[0]])\n latBndsVar[:,:] = np.array([yboundaries[:-1], yboundaries[1:]]).T\n \n varz.standard_name = 'depth'\n varz.units = 'metres'\n ncout.variables['DEPH_CORRECTED'].bounds = 'depth_bnds'\n depthBndsVar = ncout.createVariable('depth_bnds', 'float64', (self.zvar, 'bnds'))\n zboundaries = np.concatenate([self.zminbounds, np.reshape(self.zmaxbounds[-1],(1,1))[0]])\n depthBndsVar[:,:] = np.array([zboundaries[:-1], zboundaries[1:]]).T\n\n vartmean = ncout.createVariable('tmean', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varmtmean = ncout.createVariable(self.datavar, 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varsum = ncout.createVariable('sum', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varmsum = ncout.createVariable('meansum', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varcount = ncout.createVariable('count', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n# varmax = ncout.createVariable('gmax', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n# varmin = ncout.createVariable('gmin', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n# varmed = ncout.createVariable('median', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n varpcount = ncout.createVariable('pcount', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n vartime = ncout.createVariable('time', 'float64', ('time',))\n vartime.units = 'hours since 0001-01-01 00:00:00'\n vartime.calendar = 'gregorian'\n\n # Write to variables\n varx[:] = self.xgrid\n vary[:] = self.ygrid\n varz[:] = self.zgrid\n vartmean[:] = self.grid_tmean[np.newaxis]\n varmtmean[:] = self.grid_meantmean[np.newaxis]\n varsum[:] = self.grid_sum[np.newaxis]\n varmsum[:] = self.grid_meansum[np.newaxis]\n varcount[:] = self.grid_count[np.newaxis]\n varpcount[:] = self.grid_pcount[np.newaxis]\n# varmax[:] = self.grid_max[np.newaxis]\n# varmin[:] = self.grid_min[np.newaxis]\n# varmed[:] = self.grid_med[np.newaxis]\n vartime[:] = date2num(self.dt, units=vartime.units, calendar=vartime.calendar)\n \n # Add global attributes\n ncout.history = 'Created ' + time.ctime(time.time())\n \n # Save\n ncout.close()", "def find_loc_indices(loc, dir, tile):\n #returns the indices of the nearest neighbor point in the given tile, the lon/lat of the nearest neighbor, \n #and the distance (m) from the given point to the nearest neighbor grid cell\n \n filename_pattern = '*grid.tile{0}.nc'.format(tile)\n for f_name in os.listdir(dir):\n if fnmatch.fnmatch(f_name, filename_pattern):\n filename = f_name\n if not filename:\n message = 'No filenames matching the pattern {0} found in {1}'.format(filename_pattern,dir)\n logging.critical(message)\n raise Exception(message)\n \n nc_file = Dataset('{0}/{1}'.format(dir,filename))\n #read in supergrid longitude and latitude\n lon_super = np.array(nc_file['x']) #[lat,lon] or [y,x] #.swapaxes(0,1)\n lat_super = np.array(nc_file['y']) #[lat,lon] or [y,x] #.swapaxes(0,1)\n #get the longitude and latitude data for the grid centers by slicing the supergrid \n #and taking only odd-indexed values\n longitude = lon_super[1::2,1::2]\n latitude = lat_super[1::2,1::2]\n nc_file.close()\n \n adj_long = False \n #look for reversal of longitude; if found, adjust longitude so that 0-360 transition doesn't exist\n temp_loc = copy.deepcopy(loc)\n for row in longitude:\n if not (np.all(np.diff(row) >= 0) or np.all(np.diff(row) <= 0)):\n adj_long = True\n if adj_long:\n longitude[longitude < 180] += 360\n if loc[0] < 180:\n temp_loc[0] += 360\n \n #set up an array to hold the euclidean distance between the given point and every grid cell\n eucl_dist = np.zeros((longitude.shape[0],longitude.shape[1]))\n \n #get the Cartesian location of the given point\n cart_loc = np.array(sph2cart(math.radians(temp_loc[0]), math.radians(temp_loc[1]), earth_radius))\n \n for i in range(len(longitude)):\n for j in range(len(longitude[i])):\n #get the Cartesian location of all grid points\n cart_cell = np.array(sph2cart(math.radians(longitude[i,j]), math.radians(latitude[i,j]), earth_radius))\n \n #calculate the euclidean distance from the given point to the current grid cell\n eucl_dist[i,j] = np.linalg.norm(cart_loc - cart_cell)\n \n #get the indices of the grid point with the minimum euclidean distance to the given point\n i,j = np.unravel_index(eucl_dist.argmin(), eucl_dist.shape)\n \n return (i,j,longitude[i,j]%360.0, latitude[i,j], eucl_dist[i,j])", "def copy_to_scratch_from_coordinates(coords_csv, production_info, scratch_info, output_report_path):\n handler = logging.StreamHandler(sys.stdout)\n logger.setLevel(logging.INFO)\n logging.getLogger().addHandler(handler)\n\n coords_xyz = pd.read_csv(coords_csv, header=None, names=['x', 'y', 'z'], dtype=np.int32).values\n \n coords_xyz, copy_infos = copy_to_scratch(production_info, scratch_info, coords_xyz)\n scratch_split_svs = np.array([info.split_sv for info in copy_infos])\n cleave_info = cleave_supervoxels_as_isolated_bodies( scratch_info, scratch_split_svs )\n\n logger.info(\"Preparing output CSV\")\n table = []\n for coord_xyz, copy_info, cleave_info in zip(coords_xyz, copy_infos, cleave_info):\n x, y, z = coord_xyz\n production_sv = copy_info.src_sv\n scratch_sv = cleave_info[0]\n scratch_body = cleave_info[1]\n scratch_cleaved_body = cleave_info[2]\n table.append( (x,y,z,production_sv,scratch_sv,scratch_body,scratch_cleaved_body) )\n \n df = pd.DataFrame(table, columns=['x','y','z','production_sv','scratch_sv','scratch_body','scratch_cleaved_body'])\n df.to_csv(output_report_path, index=False)\n logger.info(\"DONE!\")", "def get_tile_info(file_info, img_info):\n all_tiles = []\n new_tiles = {}\n if img_info['invert_x']:\n xmin = img_info['width']*img_info['scale'] - img_info['viewer']['right']\n xmax = img_info['width']*img_info['scale'] - img_info['viewer']['left']\n else:\n xmin = img_info['viewer']['left']\n xmax = img_info['viewer']['right']\n if img_info['invert_y']:\n ymin = img_info['height']*img_info['scale'] - img_info['viewer']['bottom']\n ymax = img_info['height']*img_info['scale'] - img_info['viewer']['top']\n else:\n ymin = img_info['viewer']['top']\n ymax = img_info['viewer']['bottom']\n minCol = int(max(1,math.floor(xmin/file_info['tile_width'])))-1\n maxCol=int(min(img_info['columns'],math.ceil(xmax/file_info['tile_width'])))\n minRow = int(max(1,math.floor(ymin/file_info['tile_height'])))-1\n maxRow = int(min(img_info['rows'],math.ceil(ymax/file_info['tile_height'])))\n \n block_width = int(math.ceil(file_info['tile_width']/img_info['scale']))\n block_height = int(math.ceil(file_info['tile_height']/img_info['scale']))\n \n for row in range(minRow,maxRow):\n y0 = row*file_info['tile_height']\n yf = (row+1)*file_info['tile_height']\n y0_idx = int(y0/img_info['scale'])\n yf_idx = min(y0_idx + block_height, img_info['height'])\n for col in range(minCol,maxCol):\n all_tiles.append(str(col)+','+str(row))\n tile_idx = str(col)+','+str(row)\n if (tile_idx not in img_info['tiles'] or \n 'loaded' not in img_info['tiles'][tile_idx] or\n not img_info['tiles'][tile_idx]['loaded']):\n x0 = col*file_info['tile_width']\n xf = (col+1)*file_info['tile_width']\n x0_idx = int(x0/img_info['scale'])\n xf_idx = min(x0_idx+block_width, img_info['width'])\n tile_width = int((xf_idx-x0_idx)*img_info['scale'])\n tile_height = int((yf_idx-y0_idx)*img_info['scale'])\n new_filepath = get_tile_filename(\n file_info, img_info, x0_idx, xf_idx, y0_idx, yf_idx)\n tile = {\n 'idx': tile_idx,\n 'left': x0,\n 'right': xf,\n 'top': y0,\n 'bottom': yf,\n 'y0_idx': y0_idx,\n 'yf_idx': yf_idx,\n 'x0_idx': x0_idx,\n 'xf_idx': xf_idx,\n 'new_filepath': new_filepath,\n 'loaded': False,\n 'row': row,\n 'col': col,\n 'x': col*file_info['tile_width'],\n 'y': row*file_info['tile_height'],\n 'width': tile_width,\n 'height': tile_height\n }\n if img_info['invert_y']:\n tile['top'] = yf\n tile['bottom'] = y0\n if img_info['invert_x']:\n tile['left'] = xf\n tile['right'] = x0\n new_tiles[tile_idx] = tile\n print('viewer:', img_info['viewer'])\n print('new tiles', new_tiles.keys())\n return all_tiles, new_tiles", "def slice(\n filename,\n number_tiles=None,\n col=None,\n row=None,\n save=True,\n DecompressionBombWarning=True,\n):\n if DecompressionBombWarning is False:\n Image.MAX_IMAGE_PIXELS = None\n\n im = Image.open(filename)\n im_w, im_h = im.size\n\n columns = 0\n rows = 0\n if number_tiles:\n validate_image(im, number_tiles)\n columns, rows = calc_columns_rows(number_tiles)\n else:\n validate_image_col_row(im, col, row)\n columns = col\n rows = row\n\n tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows))\n\n tiles = []\n number = 1\n for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error.\n for pos_x in range(0, im_w - columns, tile_w): # as above.\n area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)\n image = im.crop(area)\n position = (int(floor(pos_x / tile_w)) + 1, int(floor(pos_y / tile_h)) + 1)\n coords = (pos_x, pos_y)\n tile = Tile(image, number, position, coords)\n tiles.append(tile)\n number += 1\n if save:\n save_tiles(\n tiles, prefix=get_basename(filename), directory=os.path.dirname(filename)\n )\n return tuple(tiles)", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def zoning_example(data):\n\n # account all outer walls not adjacent to the ambient to the entity\n # \"inner wall\"\n # !right now the wall construction of the added wall is not respected,\n # the same wall construction as regular\n # inner wall is set\n for index, line in data.iterrows():\n if not pd.isna(line[\"WallAdjacentTo\"]):\n data.at[index, \"InnerWallArea[m²]\"] = (\n data.at[index, \"OuterWallArea[m²]\"]\n + data.at[index, \"WindowArea[m²]\"]\n + data.at[index, \"InnerWallArea[m²]\"]\n )\n data.at[index, \"WindowOrientation[°]\"] = np.NaN\n data.at[index, \"WindowArea[m²]\"] = np.NaN\n data.at[index, \"WindowConstruction\"] = np.NaN\n data.at[index, \"OuterWallOrientation[°]\"] = np.NaN\n data.at[index, \"OuterWallArea[m²]\"] = np.NaN\n data.at[index, \"OuterWallConstruction\"] = np.NaN\n\n # make all rooms that belong to a certain room have the same room identifier\n _list = []\n for index, line in data.iterrows():\n if pd.isna(line[\"BelongsToIdentifier\"]):\n _list.append(line[\"RoomIdentifier\"])\n else:\n _list.append(line[\"BelongsToIdentifier\"])\n data[\"RoomCluster\"] = _list\n\n # check for lines in which the net area is zero, marking an second wall\n # or window\n # element for the respective room, and in which there is still stated a\n # UsageType which is wrong\n # and should be changed in the file\n for i, row in data.iterrows():\n if row[\"NetArea[m²]\"] == 0 and not pd.isna(row[\"UsageType\"]):\n warnings.warn(\n \"In line %s the net area is zero, marking an second wall or \"\n \"window element for the respective room, \"\n \"and in which there is still stated a UsageType which is \"\n \"wrong and should be changed in the file\" % i\n )\n\n # make all rooms of the cluster having the usage type of the main usage type\n _groups = data.groupby([\"RoomCluster\"])\n for index, cluster in _groups:\n count = 0\n for line in cluster.iterrows():\n if pd.isna(line[1][\"BelongsToIdentifier\"]) and not pd.isna(\n line[1][\"UsageType\"]\n ):\n main_usage = line[1][\"UsageType\"]\n for i, row in data.iterrows():\n if row[\"RoomCluster\"] == line[1][\"RoomCluster\"]:\n data.at[i, \"RoomClusterUsage\"] = main_usage\n count += 1\n if count != 1:\n warnings.warn(\n \"This cluster has more than one main usage type or none, \"\n \"check your excel file for mistakes! \\n\"\n \"Common mistakes: \\n\"\n \"-NetArea of a wall is not equal to 0 \\n\"\n \"-UsageType of a wall is not empty \\n\"\n \"Explanation: Rooms may have outer walls/windows on different orientations.\\n\"\n \"Every row with an empty slot in the column UsageType, \"\n \"marks another direction of an outer wall and/or\"\n \"window entity of the same room.\\n\"\n \"The connection of the same room is realised by an \"\n \"RoomIdentifier equal to the respective \"\n \"BelongsToIdentifier. \\n Cluster = %s\" % cluster\n )\n\n # name usage types after usage types available in the json\n usage_to_json_usage = {\n \"IsolationRoom\": \"Bed room\",\n \"PatientRoom\": \"Bed room\",\n \"Aisle\": \"Corridors in the general care area\",\n \"Technical room\": \"Stock, technical equipment, archives\",\n \"Washing\": \"WC and sanitary rooms in non-residential buildings\",\n \"Stairway\": \"Corridors in the general care area\",\n \"WC\": \"WC and sanitary rooms in non-residential buildings\",\n \"Storage\": \"Stock, technical equipment, archives\",\n \"Lounge\": \"Meeting, Conference, seminar\",\n \"Office\": \"Meeting, Conference, seminar\",\n \"Treatment room\": \"Examination- or treatment room\",\n \"StorageChemical\": \"Stock, technical equipment, archives\",\n \"EquipmentServiceAndRinse\": \"WC and sanitary rooms in non-residential buildings\",\n }\n\n # rename all zone names from the excel to the according zone name which\n # is in the UseConditions.json files\n usages = get_list_of_present_entries(data[\"RoomClusterUsage\"])\n for usage in usages:\n data[\"RoomClusterUsage\"] = np.where(\n data[\"RoomClusterUsage\"] == usage,\n usage_to_json_usage[usage],\n data[\"RoomClusterUsage\"],\n )\n\n # name the column where the zones are defined \"Zone\"\n data[\"Zone\"] = data[\"RoomClusterUsage\"]\n\n return data", "def get_tile_index_range(dataset_filename):\n dataset = gdal.Open(dataset_filename)\n assert dataset, 'Unable to open dataset %s' % dataset_filename\n spatial_reference = osr.SpatialReference()\n spatial_reference.ImportFromWkt(dataset.GetProjection())\n geotransform = dataset.GetGeoTransform()\n logger.debug('geotransform = %s', geotransform)\n# latlong_spatial_reference = spatial_reference.CloneGeogCS()\n tile_spatial_reference = osr.SpatialReference()\n s = re.match('EPSG:(\\d+)', tile_type_info['crs'])\n if s:\n epsg_code = int(s.group(1))\n logger.debug('epsg_code = %d', epsg_code)\n assert tile_spatial_reference.ImportFromEPSG(epsg_code) == 0, 'Invalid EPSG code for tile projection'\n else:\n assert tile_spatial_reference.ImportFromWkt(tile_type_info['crs']), 'Invalid WKT for tile projection'\n \n logger.debug('Tile WKT = %s', tile_spatial_reference.ExportToWkt())\n \n coord_transform_to_tile = osr.CoordinateTransformation(spatial_reference, tile_spatial_reference)\n # Upper Left\n xmin, ymax, _z = coord_transform_to_tile.TransformPoint(geotransform[0], geotransform[3], 0)\n # Lower Right\n xmax, ymin, _z = coord_transform_to_tile.TransformPoint(geotransform[0] + geotransform[1] * dataset.RasterXSize, \n geotransform[3] + geotransform[5] * dataset.RasterYSize, \n 0)\n \n logger.debug('Coordinates: xmin = %f, ymin = %f, xmax = %f, ymax = %f', xmin, ymin, xmax, ymax)\n\n return (int(floor((xmin - tile_type_info['x_origin']) / tile_type_info['x_size'])), \n int(floor((ymin - tile_type_info['y_origin']) / tile_type_info['y_size'])), \n int(ceil((xmax - tile_type_info['x_origin']) / tile_type_info['x_size'])), \n int(ceil((ymax - tile_type_info['y_origin']) / tile_type_info['y_size'])))", "def __init__(self, df, num_classes, image_size, device):\n self.maps = df['map_path'].tolist() \n self.contours = df['contourLevel'].tolist()\n self.points = df['tagged_points_path'].tolist()\n self.masks = df['tagged_path'].tolist()\n self.num_classes = num_classes\n self.image_size = image_size\n self.device = device", "def tile_at(self, zoom, position):\n x, y = self.project_pixels(position, zoom)\n return (zoom, int(x/self.tileSize), int(y/self.tileSize))", "def entries(self):\n out = []\n for y,x in self.coords(False):\n out.append((y,x,self.retrieve(y,x)))\n return out", "def write_dataframe(path, dataset_dict, image_list, datasettype, mask_part):\n for mask_el in mask_part:\n titles = []\n for i in range(len(image_list)):\n # Get rid of .tif and the path before\n image_name = os.path.split(image_list[i])[1]\n titles.append(image_name[:-4])\n df = pd.DataFrame({'Sample name': pd.Series(titles),\n 'GT count': pd.Series(dataset_dict['count_masks_%s' % mask_el]),\n 'Network count': pd.Series(dataset_dict['count_preds_%s' % mask_el]),\n 'GT area': pd.Series(dataset_dict['area_masks_%s' % mask_el]),\n 'Network area': pd.Series(dataset_dict['area_preds_%s' % mask_el]),\n 'Network dice pixel': pd.Series(dataset_dict['dice_coeffs_%s' % mask_el]),\n 'Network dice object': pd.Series(dataset_dict['object_dc_%s' % mask_el]),\n 'Network True pos': pd.Series(dataset_dict['tp_%s' % mask_el]),\n 'Network False pos': pd.Series(dataset_dict['fp_%s' % mask_el]),\n 'Network False neg': pd.Series(dataset_dict['fn_%s' % mask_el])})\n df.to_excel(str(os.path.join(path, datasettype + '_Dataframe_' + mask_el + '.xlsx')))\n # df.to_csv(path + datasettype + '_Dataframe_' + mask_el + '.csv')\n return", "def bounds():\n\n with open('bounds.csv', 'w', newline='') as file:\n writer = csv.writer(file, delimiter=\",\")\n writer.writerow([\"File_number\", \"Bounds:Left\", \"Bounds:Bottom\" , \"Bounds:Right\" , \"Bounds:Top\"])\n \n for i in range(1,44):\n if i < 10:\n df = rasterio.open(f\"DSM/DSM_k0{i}.tif\")\n \n with open (\"bounds.csv\", 'a', newline='') as file:\n writer = csv.writer(file, delimiter=\",\")\n writer.writerow([i , df.bounds.left , df.bounds.bottom , df.bounds.right , df.bounds.top])\n \n else:\n df = rasterio.open(f\"DSM/DSM_k{i}.tif\")\n \n with open (\"bounds.csv\", 'a', newline='') as file:\n writer = csv.writer(file, delimiter=\",\")\n writer.writerow([i , df.bounds.left , df.bounds.bottom , df.bounds.right , df.bounds.top])", "def reformat_xyz(tile_gdf):\n tile_gdf['xyz'] = tile_gdf.id.apply(lambda x: x.lstrip('(,)').rstrip('(,)').split(','))\n tile_gdf['xyz'] = [[int(q) for q in p] for p in tile_gdf['xyz']]\n return tile_gdf", "def convert_tile(fname, out_fname, compression, filter_opts):\n with h5py.File(out_fname, 'w') as fid:\n with rasterio.open(fname) as ds:\n # global attributes\n attach_attributes(fid, ds.tags())\n\n # find and convert every subsdataset (sds)\n for sds_name in ds.subdatasets:\n with rasterio.open(sds_name) as sds:\n ds_name = Path(sds_name.replace(':', '/')).name\n\n # create empty or copy the user supplied filter options\n if not filter_opts:\n f_opts = dict()\n else:\n f_opts = filter_opts.copy()\n\n # use sds native chunks if none are provided\n if 'chunks' not in f_opts:\n f_opts['chunks'] = list(sds.block_shapes[0])\n\n # modify to have 3D chunks if we have a multiband sds\n if sds.count == 3:\n # something could go wrong if a user supplies\n # a 3D chunk eg (2, 256, 340)\n f_opts['chunks'].insert(0, 1)\n f_opts['chunks'] = tuple(f_opts['chunks'])\n else:\n f_opts['chunks'] = tuple(f_opts['chunks'])\n\n # subdataset attributes and spatial attributes\n attrs = sds.tags()\n attrs['geotransform'] = sds.transform.to_gdal()\n attrs['crs_wkt'] = sds.crs.wkt\n\n # ensure single band sds is read a 2D not 3D\n data = sds.read() if sds.count == 3 else sds.read(1)\n\n # write to disk as an IMAGE Class Dataset\n write_h5_image(data, ds_name, fid, attrs=attrs,\n compression=compression,\n filter_opts=f_opts)", "def _testTilesZXY(server, admin, itemId, metadata, tileParams=None,\n imgHeader=utilities.JPEGHeader, token=None):\n if tileParams is None:\n tileParams = {}\n if token:\n kwargs = {'token': token}\n else:\n kwargs = {'user': admin}\n # We should get images for all valid levels, but only within the\n # expected range of tiles.\n for z in range(metadata.get('minLevel', 0), metadata['levels']):\n maxX = math.ceil(float(metadata['sizeX']) * 2 ** (\n z - metadata['levels'] + 1) / metadata['tileWidth']) - 1\n maxY = math.ceil(float(metadata['sizeY']) * 2 ** (\n z - metadata['levels'] + 1) / metadata['tileHeight']) - 1\n # Check the four corners on each level\n for (x, y) in ((0, 0), (maxX, 0), (0, maxY), (maxX, maxY)):\n resp = server.request(path='/item/%s/tiles/zxy/%d/%d/%d' % (\n itemId, z, x, y), params=tileParams, isJson=False,\n **kwargs)\n if (resp.output_status[:3] != b'200' and\n metadata.get('sparse') and z > metadata['sparse']):\n assert utilities.respStatus(resp) == 404\n continue\n assert utilities.respStatus(resp) == 200\n image = utilities.getBody(resp, text=False)\n assert image[:len(imgHeader)] == imgHeader\n # Check out of range each level\n for (x, y) in ((-1, 0), (maxX + 1, 0), (0, -1), (0, maxY + 1)):\n resp = server.request(path='/item/%s/tiles/zxy/%d/%d/%d' % (\n itemId, z, x, y), params=tileParams, **kwargs)\n if x < 0 or y < 0:\n assert utilities.respStatus(resp) == 400\n assert 'must be positive integers' in resp.json['message']\n else:\n assert utilities.respStatus(resp) == 404\n assert ('does not exist' in resp.json['message'] or\n 'outside layer' in resp.json['message'])\n # Check negative z level\n resp = server.request(path='/item/%s/tiles/zxy/-1/0/0' % itemId,\n params=tileParams, **kwargs)\n assert utilities.respStatus(resp) == 400\n assert 'must be positive integers' in resp.json['message']\n # Check non-integer z level\n resp = server.request(path='/item/%s/tiles/zxy/abc/0/0' % itemId,\n params=tileParams, **kwargs)\n assert utilities.respStatus(resp) == 400\n assert 'must be integers' in resp.json['message']\n # If we set the minLevel, test one lower than it\n if 'minLevel' in metadata:\n resp = server.request(path='/item/%s/tiles/zxy/%d/0/0' % (\n itemId, metadata['minLevel'] - 1), params=tileParams, **kwargs)\n assert utilities.respStatus(resp) == 404\n assert 'layer does not exist' in resp.json['message']\n # Check too large z level\n resp = server.request(path='/item/%s/tiles/zxy/%d/0/0' % (\n itemId, metadata['levels']), params=tileParams, **kwargs)\n assert utilities.respStatus(resp) == 404\n assert 'layer does not exist' in resp.json['message']", "def write_geojson(vec:gpd.GeoDataFrame, dest):\n\t\tdest = str(dest)\n\n\t\t# WGS 84\n\t\t#vec = vec.to_crs({'init': 'epsg:4326'})\n\n\t\tif os.path.isfile(dest):\n\t\t\tos.remove(dest)\n\t\t\t\n\t\tvec.to_file(dest, driver='GeoJSON', encoding='utf-8')", "def tile_and_bind3d(s, tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):\n y_factor = y_factor or z_factor\n x_factor = x_factor or y_factor\n zo, zi = s[tensor].split(z, z_factor)\n yo, yi = s[tensor].split(y, y_factor)\n xo, xi = s[tensor].split(x, x_factor)\n s[tensor].bind(zo, te.thread_axis(\"blockIdx.z\"))\n s[tensor].bind(zi, te.thread_axis(\"threadIdx.z\"))\n s[tensor].bind(yo, te.thread_axis(\"blockIdx.y\"))\n s[tensor].bind(yi, te.thread_axis(\"threadIdx.y\"))\n s[tensor].bind(xo, te.thread_axis(\"blockIdx.x\"))\n s[tensor].bind(xi, te.thread_axis(\"threadIdx.x\"))\n s[tensor].reorder(zo, yo, xo, zi, yi, xi)\n return zo, yo, xo, zi, yi, xi", "def save_grid(\n rho, psi, resol,\n save_options,\n npy, npz, hdf5,\n loc, ix, its_per_save,\n ):\n\n save_num = int((ix + 1) / its_per_save)\n\n if (save_options[0]):\n if (npy):\n file_name = \"rho_#{0}.npy\".format(save_num)\n np.save(\n os.path.join(os.path.expanduser(loc), file_name),\n rho\n )\n if (npz):\n file_name = \"rho_#{0}.npz\".format(save_num)\n np.savez(\n os.path.join(os.path.expanduser(loc), file_name),\n rho\n )\n if (hdf5):\n file_name = \"rho_#{0}.hdf5\".format(save_num)\n file_name = os.path.join(os.path.expanduser(loc), file_name)\n f = h5py.File(file_name, 'w')\n dset = f.create_dataset(\"init\", data=rho)\n f.close()\n if (save_options[2]):\n plane = rho[:, :, int(resol / 2)]\n if (npy):\n file_name = \"plane_#{0}.npy\".format(save_num)\n np.save(\n os.path.join(os.path.expanduser(loc), file_name),\n plane\n )\n if (npz):\n file_name = \"plane_#{0}.npz\".format(save_num)\n np.savez(\n os.path.join(os.path.expanduser(loc), file_name),\n plane\n )\n if (hdf5):\n file_name = \"plane_#{0}.hdf5\".format(save_num)\n file_name = os.path.join(os.path.expanduser(loc), file_name)\n f = h5py.File(file_name, 'w')\n dset = f.create_dataset(\"init\", data=plane)\n f.close()\n if (save_options[1]):\n if (npy):\n file_name = \"psi_#{0}.npy\".format(save_num)\n np.save(\n os.path.join(os.path.expanduser(loc), file_name),\n psi\n )\n if (npz):\n file_name = \"psi_#{0}.npz\".format(save_num)\n np.savez(\n os.path.join(os.path.expanduser(loc), file_name),\n psi\n )\n if (hdf5):\n file_name = \"psi_#{0}.hdf5\".format(save_num)\n file_name = os.path.join(os.path.expanduser(loc), file_name)\n f = h5py.File(file_name, 'w')\n dset = f.create_dataset(\"init\", data=psi)\n f.close()\n if (save_options[4]):\n line = rho[:, int(resol / 2), int(resol / 2)]\n file_name2 = \"line_#{0}.npy\".format(save_num)\n np.save(\n os.path.join(os.path.expanduser(loc), file_name2),\n line\n )", "def clean_and_save_worldwide(df):\n drop_columns = ['FIPS',\n 'Lat', \n 'Long_', \n 'Combined_Key', \n 'Admin2', \n 'Province_State']\n\n df.drop(columns=drop_columns, inplace=True)\n\n df_cases = df.groupby(['Country_Region'], as_index=False).sum()\n df_cases.to_csv('../data/Total_cases_worldwide.csv', index=False)", "def make_tiles(input_path, save_path, dimension):\n for filename in os.listdir(input_path):\n if filename.endswith(\".png\"):\n image_path = input_path + filename\n\n width, height = Image.open(image_path).size\n\n # Ensures image is square.\n assert width == height\n # Ensures the image can be cut into the desired dimensions.\n assert width % dimension == 0\n n_tiles = (width / dimension) ** 2\n\n tiles = image_slicer.slice(image_path, n_tiles, save=False)\n image_slicer.save_tiles(\n tiles, directory=save_path, prefix=filename[0:2], format=\"png\"\n )", "def get_specific_tile(idx, tiles_gdf):\n tile_poly = tiles_gdf.iloc[idx]['geometry']\n # print(tile_poly.bounds)\n return tile_poly", "def prepare_map(self):\n for y_coord, row in enumerate(self.contents):\n for x_coord, tile in enumerate(row):\n bit_map = self.get_tile_bitmap(tile)\n self.image[y_coord * TILE_SIZE:(y_coord+1) * TILE_SIZE,\n x_coord * TILE_SIZE:(x_coord+1) * TILE_SIZE] = bit_map", "def replicate(self, nx, ny, nz):\n contents_list = []\n numreplicate = 0\n for i in range(nx):\n for j in range(ny):\n for k in range(nz):\n new_df = self.contents.copy()\n new_df['X'] += i * self.lengthx\n new_df['Y'] += j * self.lengthy\n new_df['Z'] += k * self.lengthz\n contents_list.append(new_df)\n numreplicate += 1\n self.numatom *= numreplicate\n self.contents = pd.concat(contents_list)", "def save_potholes(df, path=\"potholes.csv\"):\n\t\n\tdf.to_csv(path)", "def clean_and_save_timeseries(df):\n drop_columns = ['Lat', \n 'Long', \n 'Province/State']\n\n df.drop(columns=drop_columns, inplace = True)\n \n df_grouped = df.groupby(['Country/Region'], as_index=False).sum()\n df_grouped = df_grouped.set_index('Country/Region').transpose()\n df_grouped.reset_index(level=0, inplace=True)\n df_grouped.rename(columns={'index': 'Date'}, inplace=True)\n df_grouped['Date'] = pd.to_datetime(df_grouped['Date'])\n\n df_grouped.to_csv('../data/worldwide_timeseries.csv', index=False)", "def add_pos_features(df: pd.DataFrame, drop_scores=False) -> pd.DataFrame:\n # Distance between left and right points in pairs of limbs\n # relative to image size (Euclidean, horizontal and vertical)\n for point_type in ('elbow', 'wrist', 'knee', 'ankle'):\n d = np.apply_along_axis(\n distance, 1, df[[\n f'left_{point_type}_x', f'left_{point_type}_y',\n f'right_{point_type}_x', f'right_{point_type}_y'\n ]].values)\n df[f'{point_type}s_dist'], df[f'{point_type}s_hor_dist'], \\\n df[f'{point_type}s_vert_dist'] = d.transpose()\n\n # Distance between specific keypoint pairs\n for point_1, point_2 in [('wrist', 'ankle'), ('wrist', 'knee'),\n ('wrist', 'hip'), ('wrist', 'elbow'),\n ('wrist', 'shoulder'), ('wrist', 'ear'),\n ('ankle', 'hip'), ('ankle', 'ear'),\n ('elbow', 'knee'), ('knee', 'hip')]:\n for side_1 in ('left', 'right'):\n for side_2 in ('left', 'right'):\n d = np.apply_along_axis(\n distance, 1, df[[\n f'{side_1}_{point_1}_x', f'{side_1}_{point_1}_y',\n f'{side_2}_{point_2}_x', f'{side_2}_{point_2}_y'\n ]].values)\n df[f'{side_1}_{point_1}_{side_2}_{point_2}_dist'], \\\n df[f'{side_1}_{point_1}_{side_2}_{point_2}_hor_dist'], \\\n df[f'{side_1}_{point_1}_{side_2}_{point_2}_vert_dist'] = d.transpose()\n\n # Relative upper / lower positions of specific keypoints (binary values: 0/1)\n for point_1, point_2 in combinations(['ear', 'hip', 'knee', 'ankle', 'wrist', 'elbow'], 2):\n for side_1 in ('left', 'right'):\n for side_2 in ('left', 'right'):\n df[f'{side_1}_{point_1}_{side_2}_{point_2}'] = np.apply_along_axis(\n is_higher, 1, df[[\n f'{side_1}_{point_1}_y', f'{side_2}_{point_2}_y'\n ]].values)\n\n if drop_scores:\n columns = filter(lambda x: x.find('score') == -1, df.columns)\n df = df[columns]\n\n # print('Positional features added. DataFrame shape:', df.shape)\n\n return df", "def offline_plotly_scatter3d(df, x=0, y=1, z=-1):\n data = []\n # clusters = []\n colors = ['rgb(228,26,28)', 'rgb(55,126,184)', 'rgb(77,175,74)']\n\n # df.columns = clean_columns(df.columns)\n\n x = get_array(df, x, default=0)\n y = get_array(df, y, default=1)\n z = get_array(df, z, default=-1)\n for i in range(len(df['name'].unique())):\n name = df['Name'].unique()[i]\n color = colors[i]\n x = x[pd.np.array(df['name'] == name)]\n y = y[pd.np.array(df['name'] == name)]\n z = z[pd.np.array(df['name'] == name)]\n\n trace = dict(\n name=name,\n x=x, y=y, z=z,\n type=\"scatter3d\",\n mode='markers',\n marker=dict(size=3, color=color, line=dict(width=0)))\n data.append(trace)\n\n layout = dict(\n width=800,\n height=550,\n autosize=False,\n title='Iris dataset',\n scene=dict(\n xaxis=dict(\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(255, 255, 255)',\n showbackground=True,\n backgroundcolor='rgb(230, 230,230)'\n ),\n yaxis=dict(\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(255, 255, 255)',\n showbackground=True,\n backgroundcolor='rgb(230, 230,230)'\n ),\n zaxis=dict(\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(255, 255, 255)',\n showbackground=True,\n backgroundcolor='rgb(230, 230,230)'\n ),\n aspectratio=dict(x=1, y=1, z=0.7),\n aspectmode='manual'\n ),\n )\n\n fig = dict(data=data, layout=layout)\n\n # IPython notebook\n # plotly.iplot(fig, filename='pandas-3d-iris', validate=False)\n\n url = plotly.offline.plot(fig, filename='pandas-3d-iris', validate=False)\n return url", "def make_grid(dataset):\n top_left_lat = dataset[\"a\"][0]\n top_left_lng = dataset[\"a\"][1]\n top_right_lng = dataset[\"c\"][1]\n bot_left_lat = dataset[\"b\"][0]\n\n lng_row = []\n lat_col = []\n i = top_left_lng\n while i < top_right_lng:\n lng_row.append(round(i, 5))\n i += step\n j = bot_left_lat\n while j < top_left_lat:\n lat_col.append(round(j, 5))\n j += step\n out_grid = []\n for i in lat_col:\n row = []\n for j in lng_row:\n row.append(\"{0}:{1}:0\".format(i, j))\n out_grid.append(row)\n return out_grid", "def get_data(f, zoom_level, start_pos_1, end_pos_1, start_pos_2, end_pos_2):\n \n c = cooler.Cooler(f[str(zoom_level)])\n \n (chroms, chrom_sizes, chrom_cum_lengths) = get_chromosome_names_cumul_lengths(c)\n \n i0 = abs_coord_2_bin(c, start_pos_1, chroms, chrom_cum_lengths, chrom_sizes)\n i1 = abs_coord_2_bin(c, end_pos_1, chroms, chrom_cum_lengths, chrom_sizes)\n j0 = abs_coord_2_bin(c, start_pos_2, chroms, chrom_cum_lengths, chrom_sizes)\n j1 = abs_coord_2_bin(c, end_pos_2, chroms, chrom_cum_lengths, chrom_sizes)\n \n pixels = c.matrix(as_pixels=True, balance=False, max_chunk=np.inf)[i0:i1+1, j0:j1+1]\n \n if not len(pixels):\n return pd.DataFrame(columns=['genome_start1', 'genome_start2', 'balanced'])\n \n bins = c.bins()[['chrom', 'start', 'end', 'weight']]\n pixels = annotate(pixels, bins)\n\n pixels['genome_start1'] = chrom_cum_lengths[pixels['chrom1']] + pixels['start1']\n pixels['genome_start2'] = chrom_cum_lengths[pixels['chrom2']] + pixels['start2']\n pixels['balanced'] = (\n pixels['count'] * pixels['weight1'] * pixels['weight2']\n )\n \n return pixels[['genome_start1', 'genome_start2', 'balanced']]", "def run(tile_x, tile_y, zoom, mbtiles_file):\n conn = sqlite3.connect(mbtiles_file)\n c = conn.cursor()\n c.execute(\n (\"SELECT tile_data FROM tiles WHERE \"\n \"zoom_level=? AND tile_column=? AND tile_row=?\"),\n (zoom, tile_x, tile_y))\n mvt_content = c.fetchone()[0]\n return mvt_content", "def grid(self, (z, x, y)):\n # sources.py -> MapnikRenderer -> grid\n content = self.reader.grid(z, x, y, self.grid_fields, self.grid_layer)\n return content", "def exportPlayground(self, filepath):\n cellPositions = \"\"\n for cell in self.cells:\n if(cell.dead == False):\n cellPositions += str(cell.x) + \" \" + str(cell.y) + \"\\n\"\n \n fileWrite = open(filepath, \"w\")\n fileWrite.write(cellPositions)\n fileWrite.close()", "def add_tiles_to_cbf(cbf, tiles, verbose = False):\n array_names = []\n cbf.find_category(b\"diffrn_data_frame\")\n while True:\n try:\n cbf.find_column(b\"array_id\")\n array_names.append(cbf.get_value().decode())\n cbf.next_row()\n except Exception as e:\n assert \"CBF_NOTFOUND\" in str(e)\n break\n\n tileisint = flex.bool()\n for tilekey in sorted(tiles.keys()):\n assert len(tiles[tilekey].focus()) == 3\n if isinstance(tiles[tilekey],flex.int):\n tileisint.append(True)\n elif isinstance(tiles[tilekey],flex.double):\n tileisint.append(False)\n else:\n raise TypeError(\"Ints or doubles are required\")\n\n \"\"\" Data items in the ARRAY_STRUCTURE category record the organization and\n encoding of array data in the ARRAY_DATA category.\"\"\"\n cbf.add_category(\"array_structure\",[\"id\",\"encoding_type\",\"compression_type\",\"byte_order\"])\n for i, array_name in enumerate(array_names):\n if tileisint[i]:\n cbf.add_row([array_name,\"signed 32-bit integer\",\"packed\",\"little_endian\"])\n else:\n cbf.add_row([array_name,\"signed 64-bit real IEEE\",\"packed\",\"little_endian\"])\n\n \"\"\" Data items in the ARRAY_DATA category are the containers for the array data\n items described in the category ARRAY_STRUCTURE. \"\"\"\n cbf.add_category(\"array_data\",[\"array_id\",\"binary_id\",\"data\"])\n\n if verbose:\n print(\"Compressing tiles...\", end=' ')\n\n for i, (tilekey, array_name) in enumerate(zip(sorted(tiles.keys()), array_names)):\n focus = tiles[tilekey].focus()\n\n cbf.add_row([array_name,str(i+1)])\n\n binary_id = i+1\n data = tiles[tilekey].copy_to_byte_str()\n elements = len(tiles[tilekey])\n byteorder = b\"little_endian\"\n dimfast = focus[2]\n dimmid = focus[1]\n dimslow = focus[0]\n padding = 0\n\n if tileisint[i]:\n elsize = 4\n elsigned = 1\n\n cbf.set_integerarray_wdims_fs(\\\n pycbf.CBF_PACKED,\n binary_id,\n data,\n elsize,\n elsigned,\n elements,\n byteorder,\n dimfast,\n dimmid,\n dimslow,\n padding)\n else:\n elsize = 8\n\n cbf.set_realarray_wdims_fs(\\\n #pycbf.CBF_CANONICAL,\n pycbf.CBF_PACKED,\n binary_id,\n data,\n elsize,\n elements,\n byteorder,\n dimfast,\n dimmid,\n dimslow,\n padding)", "def to_cdo_grid(self, outfile):", "def generate_tile(self, tms_x, tms_y, tms_z, arguments):\n pass", "def get_tile(lat: float, lon: float, zoom: int) -> List:\n lat_rad = lat * math.pi / 180\n n = math.pow(2, zoom)\n col = n * ((lon + 180) / 360) # Column\n row = n * (1 - (math.log(math.tan(lat_rad) + 1 /\n math.cos(lat_rad)) / math.pi)) / 2 # Row\n\n return [int(col), int(row)]", "def save_pickle(self, filename):\n x, y, _ = self.get_coords_enu()\n cx, cy = self.get_centres_enu()\n coords = dict(x=x, y=y, cx=cx, cy=cy)\n pickle.dump(coords, open(filename, 'wb'))", "def getTile(self, x, y):#TODO Make this a dictionary, and make a pprint function\n o = {}\n for layer in self.layers.keys():\n o[layer] = str(self.layers[layer][x, y])\n return o", "def __create_xyz_points(raster, no_data=-9999):\n y, x = np.where(raster != no_data)\n z = np.extract(raster != no_data, raster)\n\n return x, y, z", "def process_columns(tup: tuple):\n column_name, data, source_name, data_type, quantiles = tup\n column = Column(column_name, data, source_name, data_type, quantiles)\n print(\"Processing column: \", column.get_long_name())\n column.quantile_histogram = QuantileHistogram(column.get_long_name(), column.ranks, column.size, quantiles)\n with open('cache/' + column.get_long_name() + '.pkl', 'wb') as output:\n pickle.dump(column, output, pickle.HIGHEST_PROTOCOL)", "def slice_to_tiles(self, tile_raw_size=None, show_info=\"\"):\n if not tile_raw_size: tile_raw_size = self.tile_raw_size\n tile_raw_w,tile_raw_h = tile_raw_size\n tile_w,tile_h = round(tile_raw_w),round(tile_raw_h)\n\n if show_info:\n print(f\" ==Slicing {show_info} Tiles==\")\n print(f' Tile raw size: {tile_raw_size[0]} x {tile_raw_size[1]} px\\n')\n\n #process into list of image objects\n tiles = []\n true_x, true_y = (0,0)\n with Image.open(self.path) as img_obj:\n w,h = img_obj.size\n for row in range(0,h-tile_h,tile_h):\n tiles_row = []\n y = round(true_y)\n for col in range(0,w-tile_w,tile_w):\n x = round(true_x)\n im_crop = img_obj.crop((x,y,x+tile_w,y+tile_h))\n tiles_row.append(im_crop)\n true_x += tile_raw_w\n tiles.append(tiles_row)\n true_y += tile_raw_h\n true_x = 0\n\n return tiles" ]
[ "0.6151705", "0.60504067", "0.5954003", "0.576589", "0.5702577", "0.5660171", "0.56481516", "0.5625405", "0.55865616", "0.5577034", "0.5532618", "0.55072516", "0.5442371", "0.542948", "0.5415182", "0.5410632", "0.54070646", "0.53605324", "0.5332602", "0.5332529", "0.5324701", "0.5306444", "0.5279124", "0.521914", "0.51857334", "0.51820284", "0.51386905", "0.51168895", "0.5113415", "0.51119965", "0.5110524", "0.50942993", "0.5093913", "0.50799537", "0.50713396", "0.50656265", "0.5038845", "0.5027227", "0.5019967", "0.50185037", "0.5017633", "0.50155723", "0.50101304", "0.50079095", "0.5000875", "0.49985194", "0.49959782", "0.4983955", "0.49828655", "0.49818707", "0.4975721", "0.49707785", "0.4959034", "0.4941037", "0.4937566", "0.49291047", "0.49261123", "0.49216217", "0.49180493", "0.49156898", "0.4913724", "0.49124748", "0.49048945", "0.49048945", "0.48965788", "0.4895841", "0.489405", "0.48930204", "0.4891637", "0.48909914", "0.48897892", "0.48834988", "0.4880929", "0.48796597", "0.48568797", "0.48499185", "0.48495325", "0.4847359", "0.4836181", "0.48361143", "0.48306638", "0.4821865", "0.48209935", "0.48205793", "0.48150045", "0.48115078", "0.48109257", "0.48107478", "0.48019856", "0.48004544", "0.47995168", "0.47943246", "0.47922182", "0.47915646", "0.4790979", "0.47889882", "0.47878993", "0.4786204", "0.47826764", "0.47811413" ]
0.7190305
0
add latitude/longitude values to a dataframe
def add_latlon(df): LLs = [num2deg(x,y,z) for x,y,z in zip(df['x'],df['y'],df['z'])] LLdf = pd.DataFrame.from_records(LLs,columns = ['latitude','longitude']) return pd.concat([df.reset_index(drop=True),LLdf],axis = 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_demo_location_history() -> geopandas.GeoDataFrame:\n np.random.seed(123)\n\n time = pd.date_range(start=datetime.fromtimestamp(1624241116), end=datetime.now(), freq=\"1min\").values\n\n center_point = (-36.875990410695394, 174.76398830024274)\n lat = np.random.normal(loc=center_point[0], scale=0.01, size=len(time))\n lon = np.random.normal(loc=center_point[1], scale=0.01, size=len(time))\n\n geometry = [Point(lon, lat) for lon, lat in zip(lon, lat)]\n return geopandas.GeoDataFrame(pd.DataFrame(dict(time=time, lat=lat, lon=lon)), geometry=geometry)", "def convert_to_geopandas(df):\n df['geometry'] = [Point(xy) for xy in zip(df.latitude, df.longitude)]\n crs = {'init': 'epsg:4326'}\n df = gpd.GeoDataFrame(df, crs=crs, geometry=df['geometry'])\n\n return df", "def add_ll(project_data):\n assert isinstance(project_data, pd.DataFrame)\n \n search = uszipcode.SearchEngine() #Set up SearchEngine() function from uszipcode\n location_list = list(project_data['Location']) #Get list of each report\n longitude_list = [] #Create list to store longitude\n latitude_list = [] #Create list to store latitude\n zip_list = [] #Create list to store zip code\n\n #Iterate through every location and update longitude, latitude, zip code lists\n for location in location_list:\n lo = (re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", location))[0] #Extract longitude from Location string\n la = (re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", location))[1] #Extract latitude from Location string\n zp = search.by_coordinates(float(la), float(lo), returns=1)[0].zipcode #Get zip code for coordinate\n longitude_list.append(lo)\n latitude_list.append(la)\n zip_list.append(zp)\n \n #Add the Longitude, Latitude, Zip Code data in new columns in dataframe\n project_data.insert(len(project_data.columns)-1, \"Longitude\", longitude_list, True)\n project_data.insert(len(project_data.columns)-1, \"Latitude\", latitude_list, True)\n project_data.insert(len(project_data.columns)-1, \"Zip\", zip_list, True)\n \n return project_data", "def createCoordTuples(data):\n data['xy'] = None\n for i, row in data.iterrows():\n data['xy'][i] = [np.round(row['geometry'].x, decimals=5), np.round(row['geometry'].y, decimals=5)]\n return data", "def _add_coordinate_data(self, df, geom_col):\n x = df.apply(self._get_coords,\n geom_col=geom_col,\n coord_type='x',\n axis=1)\n\n y = df.apply(self._get_coords,\n geom_col=geom_col,\n coord_type='y',\n axis=1)\n return x, y", "def map_coord_transformer(df, proj_string, lat_column_name, long_column_name):\n logging.info('Generating coordinate reference systems... ')\n #generate coordinate reference system objects for details of how this works \n from_crs = pyproj.CRS.from_string(proj_string)\n from_proj = pyproj.Proj(from_crs)\n gps_proj = pyproj.Proj('epsg:4326')\n original_coordinates_to_latlong_obj = pyproj.Transformer.from_proj(from_proj, gps_proj)\n logging.info('Defining transformation functions...')\n def original_coordinates_to_latlong(adf):\n (lat,long) = original_coordinates_to_latlong_obj.transform(adf[lat_column_name], adf[long_column_name])\n return lat, long\n \n #apply converter to generate series\n logging.info('Converting coordinates...')\n latlong_series = df.apply(original_coordinates_to_latlong, axis=1)\n \n #get calculated values and put back into df.\n logging.info('Splitting series...')\n lat_series = latlong_series.copy().apply(lambda x: x[0])\n long_series = latlong_series.copy().apply(lambda x: x[1])\n \n #return the values as \n logging.info('Preparing to return calc_lat and calc_long...')\n df.loc[:,'calc_lat'] = lat_series.copy()\n df.loc[:,'calc_long'] = long_series.copy()\n \n return df", "def find_center_points(df, lat1, long1, lat2, long2):\n df['center_latitude'] = (df[lat1].values + df[long2].values) / 2\n df['center_longitude'] = (df[long1].values + df[lat2].values) / 2\n\n return df", "def add_odometer(df, lat, lon):\n import pandas as pd\n import math\n df_use = df.loc[:, [(lat), (lon)]]\n df_use['prev_LAT'] = df_use.loc[:, (lat)].shift(periods=1)\n df_use['prev_LON'] = df_use.loc[:, (lon)].shift(periods=1)\n df_use['distance2'] = df_use.apply(lambda row: haversine(row['prev_LAT'], row['prev_LON'], row[(lat)], row[(lon)]),\n axis=1)\n df_use = df_use.reset_index(drop=True)\n df_use.loc[:, 'distance'] = df_use.apply(lambda x: nanthing(x.distance2), axis=1)\n df_use['prev_dist'] = df_use.loc[:, 'distance'].shift(periods=1)\n df_use['odometer'] = df_use['distance'].cumsum()\n df_use['prevod'] = df_use.loc[:, 'odometer'].shift(periods=1)\n df_use['dif'] = df_use.apply(lambda x: x.odometer - x.prevod, axis=1)\n df_use['dif'] = df_use.apply(lambda x: nanthing(x.dif), axis=1)\n return (pd.merge(df, df_use.loc[:, [(lat), (lon), 'odometer', 'distance']], on=[(lat), (lon)]))", "def geolocate_address(self):\n self.geolocator = Nominatim(user_agent=\"fundaft\")\n\n # If latitude / longitude are missing, try to geocode them on the basis\n # of the address \n self.coords = [self.get_coords(address) if np.isnan(lat)\n else (lat, lon) for address, lat, lon in\n zip(self.df_ads['property_title'], \n self.df_ads['latitude'], \n self.df_ads['longitude'])]\n \n df = pd.DataFrame(self.coords, columns=['latitude', 'longitude'])\n \n # If new coordinates are not in Dublin, change to na again\n df = self.is_in_dublin(df)\n\n self.df_ads[[\"latitude\",\"longitude\"]] = df", "def add_loc_cols(df):\r\n\r\n\tdf['STATE'] = [int(i[1:3]) for i in df.gisjoin]\r\n\tdf['COUNTY'] = [int(i[4:7]) for i in df.gisjoin]\r\n\tdf['TRACT'] = [int(i[7:-4]) for i in df.gisjoin]\r\n\tdf['BLOCK'] = [int(i[-4:]) for i in df.gisjoin]\r\n\r\n\tif df.STATE[0] > 9:\r\n\t\traise Exception(\"Warning! Code might be incorrect for states with fips code > 9\")\r\n\r\n\treturn df", "def add_loc_ocean2df(df=None, LatVar='lat', LonVar='lon'):\n from geopandas.tools import sjoin\n # Get the shapes for the ocean\n featurecla = 'ocean'\n group = get_shapes4oceans(rtn_group=True, featurecla=featurecla)\n # Turn the dataframe into a geopandas dataframe\n gdf = geopandas.GeoDataFrame(\n df, geometry=geopandas.points_from_xy(df[LonVar], df[LatVar]))\n # Work out if any of the points are within the polygons\n pointInPolys = sjoin(gdf, group, how='left')\n # Check how many were assigned to a region\n Nnew = float(pointInPolys['name'].dropna().shape[0])\n N = float(df.shape[0])\n if N != Nnew:\n pstr = 'WARNING: Only {:.2f}% assigned ({} of {})'\n print(pstr.format((Nnew/N)*100, int(Nnew), int(N)))\n # Add the ocean assingnment back into the orginal dataframe\n df[featurecla] = pointInPolys['name'].values\n return df", "def wgs84_to_mercator(df, lon, lat):\n k = 6378137\n df[\"x\"] = df[lon] * (k * np.pi/180.0)\n df[\"y\"] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k\n return df", "def _point_in_mbr(self, df):\n if df.empty:\n return df\n df = df[(df[\"lat\"] >= self._min_lat) &\n (df[\"lat\"] <= self._max_lat) &\n (df[\"lon\"] >= self._min_lon) &\n (df[\"lon\"] <= self._max_lon)\n ]\n return df", "def _prepare_geocode_result(results):\n # Prepare the data for the DataFrame as a dict of lists\n d = defaultdict(list)\n index = []\n\n for i, s in iteritems(results):\n address, loc = s\n\n # loc is lat, lon and we want lon, lat\n if loc is None:\n p = Point()\n else:\n p = Point(loc[1], loc[0])\n\n if address is None:\n address = np.nan\n\n d['geometry'].append(p)\n d['address'].append(address)\n index.append(i)\n\n df = gpd.GeoDataFrame(d, index=index)\n df.crs = from_epsg(4326)\n\n return df", "def get_time_series_at_location(data, lat, lon, feature):\n\n ts = data.sel(lat=lat, lon=lon, method='nearest', drop=True).to_series()\n index = ts.index.get_level_values('time')\n values = ts.values\n\n return pd.DataFrame({'Date': index.values, feature: values})", "def build_turbine_loc(turbine_x, turbine_y):\n turbineLoc = pd.DataFrame({'x': turbine_x, 'y': turbine_y})\n return turbineLoc", "def add_shortest_route(df):\n\n df['gmaps_dist'] = df.apply(lambda row: gmaps.getTotDist((row['pick_lon'], row['pick_lat']), (row['drop_lon'], row['drop_lat'])), axis=1)\n df['gmaps_dur'] = df.apply(lambda row: gmaps.getTotDur((row['pick_lon'], row['pick_lat']), (row['drop_lon'], row['drop_lat'])), axis=1)", "def create_pseudo_epsg4326_coordinates(self):\n self.create_3d_coord_on_sphere(on_sphere=True)\n self.df_attributes['lat'] = 180*(pi/2 - np.arccos(self.df_attributes['coord_z']))/pi\n self.df_attributes['lon'] = 180*np.arctan2(self.df_attributes['coord_y'], self.df_attributes['coord_x'])/pi", "def build_geoseries(self, dataframe):\n geo_list = []\n with click.progressbar(dataframe.iterrows(), label='Pulling site plans and geographic title data', length=len(dataframe)) as d:\n for index, row in d:\n geo_list.append(self.map_property(row['linc']))\n\n geo_series = gpd.GeoSeries([Point(mark) for mark in geo_list], index=dataframe.index)\n\n return geo_series", "def map_points(df, lat_col='latitude', lon_col='longitude', zoom_start=11, \\\n plot_points=False, pt_radius=15, \\\n draw_heatmap=False, heat_map_weights_col=None, \\\n heat_map_weights_normalize=True, heat_map_radius=15):\n\n ## center map in the middle of points center in\n middle_lat = df[lat_col].median()\n middle_lon = df[lon_col].median()\n\n curr_map = folium.Map(location=[middle_lat, middle_lon],\n zoom_start=zoom_start)\n\n # add points to map\n if plot_points:\n for _, row in df.iterrows():\n folium.CircleMarker([row[lat_col], row[lon_col]],\n radius=pt_radius,\n popup=row['name'],\n fill_color=\"#3db7e4\", # divvy color\n ).add_to(curr_map)\n\n # add heatmap\n if draw_heatmap:\n # convert to (n, 2) or (n, 3) matrix format\n if heat_map_weights_col is None:\n cols_to_pull = [lat_col, lon_col]\n else:\n # if we have to normalize\n if heat_map_weights_normalize:\n df[heat_map_weights_col] = \\\n df[heat_map_weights_col] / df[heat_map_weights_col].sum()\n\n cols_to_pull = [lat_col, lon_col, heat_map_weights_col]\n\n stations = df[cols_to_pull].as_matrix()\n curr_map.add_children(plugins.HeatMap(stations, radius=heat_map_radius))\n\n return curr_map", "def split_df(data: pd.DataFrame, start: int, stop: int) -> pd.DataFrame:\n df = data.iloc[start:stop, :].copy()\n df['coord'] = df.apply(lambda x: f\"{x['stopLat']}, {x['stopLon']}\", axis=1)\n return df.sort_values('coord')", "def insert_row(self, row_value, index):\n row = pd.DataFrame(row_value, columns=['lat', 'long', 'alt', 'descr'])\n self.df = pd.concat([self.df.iloc[:index], row, self.df.iloc[index:]]).reset_index(drop=True)", "def _add_latlon(ds, n=50):\n\n nx = ncols(ds)\n ny = nrows(ds)\n src_crs = get_crs(ds)\n dst_crs = CRS(init='epsg:4326')\n idx_x = np.linspace(0, nx - 1, n, dtype=int)\n idx_y = np.linspace(0, ny - 1, n, dtype=int)\n xs = ds.x[idx_x]\n ys = ds.y[idx_y]\n xgrid, ygrid = np.meshgrid(xs, ys)\n lon, lat = rasterio.warp.transform(src_crs, dst_crs, xgrid.flatten(),\n ygrid.flatten())\n lon_sparse = np.empty((ny, nx))\n lat_sparse = np.empty((ny, nx))\n lon_sparse[:] = np.nan\n lat_sparse[:] = np.nan\n # idx_y needs to be a column vector\n lon_sparse[idx_y[:, None], idx_x] = np.array(lon).reshape((n, n))\n lat_sparse[idx_y[:, None], idx_x] = np.array(lat).reshape((n, n))\n ds.coords['lat'] = (('y', 'x'), lat_sparse)\n ds.coords['lon'] = (('y', 'x'), lon_sparse)", "def distance_coord(df):\n temp_list_distance=[]\n list_distance=[]\n for i in range(len(df)-1):\n coord1 = (df['lat'][i], df['lon'][i])\n coord2 = (df['lat'][i+1], df['lon'][i+1])\n dist = geopy.distance.geodesic(coord1, coord2).km\n temp_list_distance.append(dist)\n list_distance.append(sum(temp_list_distance)) \n return(list_distance)", "def geocode(df, col):\r\n pass", "def _add_location_id(df: pd.DataFrame):\n if CommonFields.LOCATION_ID in df.columns:\n raise ValueError(\"location_id already in DataFrame\")\n df[CommonFields.LOCATION_ID] = df[CommonFields.FIPS].apply(pipeline.fips_to_location_id)", "def insert_df_xy(\n df: DataFrame,\n name: str,\n ws: str = \"memory\",\n spatial_reference: int = 3857) -> None:\n fields = _df_to_fields(df, 2)\n rows = df.collect()\n insert_rows_xy(rows, name, fields, ws, spatial_reference)", "def lat_lng(row):\r\n lat = row[\"latitude\"]\r\n lng = row[\"longitude\"]\r\n n = int(lat/GRANULARITY)\r\n nlat_start = n * GRANULARITY\r\n nlat_end = nlat_start + GRANULARITY\r\n nlg=int(lng/GRANULARITY)\r\n nlng_start = nlg * GRANULARITY\r\n nlng_end = nlng_start + GRANULARITY\r\n latlng=[(nlat_start,nlng_start), (nlat_start,nlng_end), (nlat_end,nlng_end), (nlat_end,nlng_start)]\r\n return latlng", "def build_polling_location_txt(self):\n self.base_df['address_line'] = self.base_df.apply(\n lambda row: self.get_address_line(row['index'], row['address1'], row['address2'], row['city'],\n row['state'], row['zip_code']), axis=1)\n\n self.base_df['directions'] = self.base_df.apply(\n lambda row: self.get_directions(), axis=1)\n #\n self.base_df['hours'] = self.base_df.apply(\n lambda row: self.get_hours(row['index'],row['start_time'], row['end_time']), axis=1)\n\n self.base_df['photo_uri'] = self.base_df.apply(\n lambda row: self.get_photo_uri(), axis=1)\n\n self.base_df['hours_open_id'] = self.base_df.apply(\n lambda row: self.create_hours_open_id(row['index'], row['address1'], row['address2'], row['city'],\n row['state'], row['zip_code']), axis=1)\n\n self.base_df['is_drop_box'] = self.base_df.apply(\n lambda row: self.is_drop_box(), axis=1)\n\n self.base_df['is_early_voting'] = self.base_df.apply(\n lambda row: self.is_early_voting(), axis=1)\n\n self.base_df['latitude'] = self.base_df.apply(\n lambda row: self.get_latitude(), axis=1)\n\n self.base_df['longitude'] = self.base_df.apply(\n lambda row: self.get_longitude(), axis=1)\n\n self.base_df['latlng_source'] = self.base_df.apply(\n lambda row: self.get_latlng_source(), axis=1)\n\n self.base_df['id'] = self.base_df.apply(\n lambda row: self.create_id(row['index'], row['ocd_division'],row['address1'], row['address2'],\n row['city'], row['state'], row['zip_code']), axis=1)\n\n return self.base_df", "def raster_to_geodataframe(*a, **kw) -> gpd.GeoDataFrame:\n kw[\"geo\"] = True\n return raster_to_dataframe(*a, **kw)", "def from_xy(df, x_column, y_column, sr=4326):\r\n from .io.fileops import _from_xy\r\n return _from_xy(df=df, x_column=x_column,\r\n y_column=y_column, sr=sr)", "def at_df(self, df):\n result = self.at(**df).reset_coords(drop=True).to_dataframe()\n if isinstance(df, pd.DataFrame):\n result.index = df.index\n return result", "def extractCoord(df):\n dfcol = df['Coord']\n for i in range(len(dfcol)):\n dfcol[i] = dfcol[i][6:-1]\n return df", "def finalize_dataframe(self, dataframe: DataFrame):\n # Drop duplicates (some geospatial datasets, like ZCTAs, include redundant rows)\n geo_names = {'geometry'}\n non_geo_names = set(dataframe.columns) - geo_names\n dataframe = dataframe.drop_duplicates(subset=non_geo_names, ignore_index=True)\n\n # Insert NAs for annotated row values to avoid outlier values like -999,999,999\n dataframe.loc[dataframe['annotation'].notnull(), 'value'] = ''\n dataframe['value'] = pd.to_numeric(dataframe['value'], errors='coerce')\n\n # Create year date column\n dataframe['date'] = pd.to_datetime(\n dataframe['year'].astype('string') + '-12-31', format='%Y-%m-%d'\n )\n\n # Rename and reorder columns\n names_csv = resource_string(__name__, 'resources/names.csv')\n csv_reader = reader(StringIO(names_csv.decode('utf-8')))\n next(csv_reader) # Skip header row\n names = dict(csv_reader) # type: ignore\n if self.geometry in ['points', 'polygons'] and (set(dataframe.columns) & geo_names):\n name_order = [*names.values(), *geo_names]\n else:\n name_order = list(names.values())\n dataframe = dataframe.rename(columns=names)[name_order]\n\n return dataframe", "def _reindex_spatial_data_to_regions(ds, df):\n\n # use vectorized indexing in xarray >= 0.10\n if LooseVersion(xr.__version__) > LooseVersion(\"0.9.999\"):\n\n lon_indexer = xr.DataArray(df.lon.values, dims=(\"reshape_index\",))\n lat_indexer = xr.DataArray(df.lat.values, dims=(\"reshape_index\",))\n\n return ds.sel(lon=lon_indexer, lat=lat_indexer)\n\n else:\n res = ds.sel_points(\"reshape_index\", lat=df.lat.values, lon=df.lon.values)\n\n return res", "def lat_lons(self):", "def load_geolocation_data():\n client = MongoClient(f'mongodb://{os.getenv(\"USR_\")}:{os.getenv(\"PWD_\")}@{os.getenv(\"REMOTE_HOST\")}:{os.getenv(\"REMOTE_PORT\")}/{os.getenv(\"AUTH_DB\")}')\n db = client[os.getenv(\"MAIN_DB\")]\n lk_collection = db[\"lkdata\"]\n data = pd.DataFrame(list(lk_collection.find()))\n data = data[[\"fields\"]]\n data = pd.concat([pd.DataFrame(data), pd.DataFrame(list(data[\"fields\"]))], axis=1).drop(\"fields\", 1)\n data[\"cca_2\"] = pd.to_numeric(data[\"cca_2\"])\n return data", "def get_grid_data(df):\n \n bools = (df['Longitude'] % 60 == 0) & (df['Year'] % 10 == 0)\n return df[bools]", "def add_distance_features(df_kek):\n df = pd.DataFrame([])\n df['distance'] = get_distance_vector(df_kek, 'latitude', 'longitude', 'del_latitude', 'del_longitude')\n df['distance_dest_from_center'] = get_distance_vector(df_kek, 'center_latitude', 'center_longitude',\n 'del_latitude', 'del_longitude')\n df['distance_start_from_center'] = get_distance_vector(df_kek, 'center_latitude', 'center_longitude',\n 'latitude', 'longitude')\n df['route_distance'] = df_kek.apply(lambda x: get_route_distance(x['route']), axis=1)\n df[df['route_distance'] == 0.0] = df['route_distance'].median()\n df = pd.concat([df, pd.get_dummies(df_kek['main_id_locality'], prefix='City')], axis=1)\n return df", "def point_in_mbr(df, min_lat, max_lat, min_lon, max_lon):\n df = df[(df[\"lat\"] >= min_lat) &\n (df[\"lat\"] <= max_lat) &\n (df[\"lon\"] >= min_lon) &\n (df[\"lon\"] <= max_lon)\n ]\n return df", "def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:", "def get_lon_lat(df, nombre, ruido=False):\n from pyproj import transform\n\n lat, lon = [], []\n for index, row in tqdm(df.iterrows()):\n lati, loni = [], []\n try:\n for pt in list(row[\"geometry\"].exterior.coords):\n lati.append(pt[1])\n loni.append(pt[0])\n except Exception as e:\n try:\n row.geometry = row.geometry.map(lambda x: x.convex_hull)\n for pt in list(row[\"geometry\"].exterior.coords):\n lati.append(pt[1])\n loni.append(pt[0])\n except Exception as e:\n try:\n lati.append(df.iloc[index].geometry.centroid.y)\n loni.append(df.iloc[index].geometry.centroid.x)\n except Exception as e:\n if not ruido:\n continue\n else:\n print(e)\n print(df.iloc[index].geometry.centroid)\n lat.append(sum(lati) / len(lati))\n lon.append(sum(loni) / len(loni))\n latnew, lonnew = [], []\n for la, lo in zip(lat, lon):\n o, a = transform(inProj, outProj, lo, la)\n if o != float(\"inf\") and a != float(\"inf\"):\n latnew.append(a)\n lonnew.append(o)\n return {nombre: {\"lon\": lonnew, \"lat\": latnew}}", "def create_geodata(x):\n list_len = len(x)\n pilot_log = pd.concat(x[i][['time','Cn0DbHz','svid','geometry']] for i in range(list_len))\n \n return pilot_log", "def from_df(df, address_column=\"address\", geocoder=None):\r\n from arcgis.geocoding import get_geocoders, geocode, batch_geocode\r\n if geocoder is None:\r\n geocoder = arcgis.env.active_gis._tools.geocoders[0]\r\n\r\n geoms = []\r\n if address_column in df.columns:\r\n # batch geocode addresses in the address column and use them as the geometry\r\n batch_size = geocoder.properties.locatorProperties.MaxBatchSize\r\n N = len(df)\r\n geoms = []\r\n for i in range(0, N, batch_size):\r\n start = i\r\n stop = i + batch_size if i + batch_size < N else N\r\n # print('Geocoding from ' + str(start) + ' to ' + str(stop))\r\n\r\n res = batch_geocode(list(df[start:stop][address_column]), geocoder=geocoder)\r\n for index in range(len(res)):\r\n address = df.ix[start + index, address_column]\r\n try:\r\n loc = res[index]['location']\r\n x = loc['x']\r\n y = loc['y']\r\n # self.ix[start + index, 'x'] = x\r\n # self.ix[start + index, 'y'] = y\r\n geoms.append(arcgis.geometry.Geometry({'x': x, 'y': y}))\r\n\r\n except:\r\n x, y = None, None\r\n try:\r\n loc = geocode(address, geocoder=geocoder)[0]['location']\r\n x = loc['x']\r\n y = loc['y']\r\n except:\r\n print('Unable to geocode address: ' + address)\r\n pass\r\n # self.ix[start + index, 'x'] = x\r\n # self.ix[start + index, 'y'] = y\r\n geoms.append(None)\r\n else:\r\n raise ValueError(\"Address column not found in dataframe\")\r\n\r\n return SpatialDataFrame(df, geometry=geoms)", "def generate_position_data(self):\n # populate 'Location' field randomly\n self.output['Location'] = np.random.choice(self.locations, self.obs)\n\n # clean up geodata data frame and create 'Position' attribute\n nc = self.geodata[['Lat', 'Lng', 'Elevation']].round(2)\n nc['Elevation'] = nc['Elevation'].astype(int)\n self.geodata['Position'] = nc.astype(\n str).apply(lambda x: ','.join(x), axis=1)\n self.geodata.drop(columns=['Lat', 'Lng', 'Elevation'], inplace=True)\n\n # update 'Position' column in output data frame\n left = self.output.set_index('Location') # set left index\n right = self.geodata.set_index('Location') # set right index\n self.output = left.loc[:, left.columns.union(right.columns)] # union\n self.output.update(right) # update self.output \"Position\" column\n self.output.reset_index(inplace=True)", "def broadcast_lonlat(ds, verbose=True):\n if 'lon' not in ds.variables:\n ds.coords['lon'] = ds['x']\n if 'lat' not in ds.variables:\n ds.coords['lat'] = ds['y']\n \n if len(ds['lon'].dims) < 2:\n ds.coords[\"lon\"] = ds[\"lon\"] * xr.ones_like(ds[\"lat\"])\n if len(ds['lat'].dims) < 2:\n ds.coords[\"lat\"] = xr.ones_like(ds[\"lon\"]) * ds[\"lat\"]\n return ds", "def add_latclonc_to_db(self):\n \n # add new columns\n try:\n command =\"ALTER TABLE {tb} ADD COLUMN latc TEXT\".format(tb=self.table_name) \n self.conn.cursor.execute(command)\n except:\n # pass if the column latc exists\n pass\n try:\n command =\"ALTER TABLE {tb} ADD COLUMN lonc TEXT\".format(tb=self.table_name) \n self.conn.cursor.execute(command)\n except:\n # pass if the column lonc exists\n pass\n\n # iterate through tvals of the self.sites\n sdtm = self.stm\n for ii, st in enumerate(self.sites):\n if ii == len(self.sites)-1:\n edtm = self.etm\n else:\n edtm = st.tval\n command = \"SELECT rowid, slist, vel, frang, rsep, datetime FROM {tb} WHERE (DATETIME(datetime)>'{sdtm}' and\\\n DATETIME(datetime)<='{edtm}') ORDER BY datetime\".format(tb=self.table_name,\\\n sdtm=str(sdtm), edtm=str(edtm))\n self.conn.cursor.execute(command)\n rows = self.conn.cursor.fetchall() \n if rows != []:\n rowid, slist, vel, frang_old, rsep_old, date_time_old = rows[0]\n\n # calculate latc_all and lonc_all in 'geo' coords\n latc_all, lonc_all = calc_latc_lonc(self.sites[ii], self.bmnum, frang_old, rsep_old, \n altitude=300., elevation=None, coord_alt=0.,\n coords=\"geo\", date_time=None)\n for row in rows:\n rowid, slist, vel, frang, rsep, date_time = row\n if (frang, rsep) != (frang_old, rsep_old):\n latc_all, lonc_all = calc_latc_lonc(self.sites[ii], self.bmnum, frang, rsep, \n altitude=300., elevation=None, coord_alt=0.,\n coords=\"geo\", date_time=None)\n\n \n frang_old, rsep_old = frang, rsep\n\n # convert from string to float\n slist = [int(float(x)) for x in slist.split(\",\")]\n vel = [float(x) for x in vel.split(\",\")]\n\n # exclude the slist values beyond maxgate and their correspinding velocities\n vel = [vel[i] for i in range(len(vel)) if slist[i] < st.maxgate]\n slist = [s for s in slist if s < st.maxgate]\n\n # extract latc and lonc values\n latc = [latc_all[s] for s in slist]\n lonc = [lonc_all[s] for s in slist]\n\n # convert to comma seperated text\n slist = \",\".join([str(x) for x in slist])\n vel = \",\".join([str(round(x,2)) for x in vel])\n latc = \",\".join([str(round(x,2)) for x in latc])\n lonc = \",\".join([str(round(x,2)) for x in lonc])\n\n # update the table\n command = \"UPDATE {tb} SET slist='{slist}', vel='{vel}',\\\n latc='{latc}', lonc='{lonc}' WHERE rowid=={rowid}\".\\\n format(tb=self.table_name, slist=slist, vel=vel,\\\n latc=latc, lonc=lonc, rowid=rowid)\n self.conn.cursor.execute(command)\n\n # update sdtm\n sdtm = edtm\n\n # commit the data into the db\n self.conn._commit()\n\n # close db connection\n self.conn._close_connection()\n \n return", "def df_add(df,index,column,value):\n\ttry:\n\t\tdf[column]\n\texcept:\n\t\tdf[column]=np.nan\n\ttry:\n\t\tdf.loc[index]\n\texcept:\n\t\tdf.loc[index]=np.nan\n\tdf.loc[index,column]=value\n\treturn df", "def coords_on_spherical_earth(self):\n self.create_3d_coord_on_sphere(on_sphere=True)\n self.df_attributes['coord_x_earth'] = 6371.009 * self.df_attributes['coord_x']\n self.df_attributes['coord_y_earth'] = 6371.009 * self.df_attributes['coord_y']\n self.df_attributes['coord_z_earth'] = 6371.009 * self.df_attributes['coord_z']", "def list_to_gdf (lis):\r\n gdf = gpd.GeoDataFrame(lis)\r\n # rename the column \r\n gdf.rename(columns ={0:\"geometry\"},inplace=True)\r\n # define crs to dataframe\r\n gdf.crs = {'init' :'epsg:{}'.format(4326)} \r\n gdf = gdf.to_crs(epsg = 4326)\r\n \r\n return gdf", "def _add_stops_to_df(self, stop_coords, signal_coords, route_df):\n\n self.stop_nn_indicies, self.stop_coord_nn = knn.find_knn(\n 1,\n route_df.geometry.values,\n stop_coords\n )\n\n\n signal_nn_indicies, singal_coord_nn = knn.find_knn(\n 1,\n route_df.geometry.values,\n signal_coords)\n\n route_df = route_df.assign(\n is_bus_stop = ([False] * len(route_df.index))\n )\n\n route_df = route_df.assign(\n is_signal = ([False] * len(route_df.index))\n )\n\n route_df = route_df.assign(\n is_stop = ([False] * len(route_df.index))\n )\n \n for i in self.stop_nn_indicies.ravel()[::3]:\n route_df.at[i, 'is_bus_stop'] = True\n route_df.at[i, 'is_stop'] = True\n \n for i in signal_nn_indicies.ravel()[::3]:\n route_df.at[i, 'is_stop'] = True\n route_df.at[i, 'is_signal'] = True\n\n # route_df.at[0, 'is_bus_stop'] = True\n # route_df.at[-1, 'is_bus_stop'] = True\n\n return route_df", "def set_coordinate(self):\n airqual_dictionary = self.realtime_data['stations'][0] #get the very first(recent) data/result\n self.latitude = airqual_dictionary['lat']\n self.longitude = airqual_dictionary['lng']", "def haversine_df(x:pd.core.frame.DataFrame):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(np.radians, [x.iloc[:,0], x.iloc[:,1], x.iloc[:,2], x.iloc[:,3]])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2\n c = 2 * np.arcsin(np.sqrt(a))\n r = 6371 # Radius of earth in kilometers\n return (c * r)", "def apply(data, options=default_options, config=default_config, warning=print):\n\n global CONFIG\n CONFIG = config\n\n global OPTIONS\n OPTIONS = options\n\n csv_keys = read_csv().keys()\n info = {\"latitude\":[],\"longitude\":[]}\n if not type(options[\"fields\"]) is str:\n raise Exception(f\"configured fields '{CONFIG['fields']}' is not valid\")\n options[\"fields\"] = options[\"fields\"].split(\",\")\n for field in options[\"fields\"]:\n if field not in csv_keys:\n raise Exception(f\"field '{field}' not found in repo '{CONFIG['csvrepo']}'\")\n info[field] = []\n if \"latitude\" not in data.columns or \"longitude\" not in data.columns:\n raise Exception(\"data must include 'latitude' and 'longitude' columns\")\n if options[\"geometry\"]:\n info[\"geometry\"] = []\n for index, row in data.iterrows():\n pos = get_position((row[\"latitude\"],row[\"longitude\"]))\n id = get_utility(pos)\n values = get_information(id)\n info[\"latitude\"].append(pos[0])\n info[\"longitude\"].append(pos[1])\n for key in options[\"fields\"]:\n info[key].extend(values[key].to_list())\n if options[\"geometry\"]:\n info[\"geometry\"].extend(get_geometry(id).to_list())\n result = pandas.DataFrame(info)\n return result", "def regrid_data(self, data_in):\n times = data_in.Times.values\n data_out = self.regridder(data_in)\n data_out = data_out.rename({'lat': 'XLAT', 'lon': 'XLONG'})\n data_out = data_out.rename({'x': 'west_east', 'y': 'south_north'})\n data_out['Times'] = ('Time', times)\n data_out['XLAT'] = (('Time', 'south_north', 'west_east'),\n np.repeat(np.expand_dims(data_out['XLAT'].values,\n axis=0),\n len(times), axis=0))\n data_out['XLONG'] = (('Time', 'south_north', 'west_east'),\n np.repeat(np.expand_dims(data_out['XLONG'].values,\n axis=0),\n len(times), axis=0))\n return data_out", "def gdf(self) -> gpd.GeoDataFrame:\n return self.just_geometry_gdf.join(self.df)", "def get_lat_lon():\r\n\r\n # Columns: dt,AverageTemperature,AverageTemperatureUncertainty,City,Country,Latitude,Longitude\r\n temperatures = pd.read_csv(\"GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv\")\r\n\r\n Latitude = temperatures['Latitude']\r\n Longitude = temperatures['Longitude']\r\n City = temperatures['City']\r\n Country = temperatures['Country']\r\n\r\n lat_array = []\r\n long_array = []\r\n cities_array = []\r\n countries_array = []\r\n tuples = []\r\n for i, j, city, country in zip(Latitude, Longitude, City, Country):\r\n if (i, j) not in tuples:\r\n tuples.append((i, j))\r\n lat_array.append(float(i[:-1]))\r\n long_array.append(float(j[:-1]))\r\n cities_array.append(city)\r\n countries_array.append(country)\r\n\r\n return lat_array, long_array, cities_array, countries_array", "def correct_lon(ds):\n ds = ds.copy()\n x = ds['x'].data\n ds['x'].data = np.where(x < 0 , 360 + x, x)\n\n lon = ds['lon'].data\n ds['lon'].data = np.where(lon < 0 , 360 + lon, lon)\n \n ds = ds.sortby('x')\n return ds", "def add_city_state_to_dataframe(dataframe):\n dataframe[['city', 'state']] = dataframe.apply(parse_city_state_from_row,\n axis=1)\n dataframe = dataframe[dataframe.state != \"NULL\"]\n return dataframe", "def _add_delta_times_to_df(self, route_df):\n\n \n\n route_df = route_df.assign(delta_times = self.delta_times)\n #route_df = route_df.assign(total_time = self.route_time)\n\n\n return route_df", "def centroid_gdf(gdf):\n df = gdf.copy()\n df['LAT'] = df['geometry'].centroid.apply(lambda p : p.coords[0][1])\n df['LONG'] = df['geometry'].centroid.apply(lambda p : p.coords[0][0])\n df = df.drop(columns=['geometry'])\n df = df.set_index('LOCATION')\n return df", "def testdata_gps():\n lon = np.array(\n [\n 4.54,\n 104.0,\n -14.9,\n 56.26,\n 103.46,\n 103.37,\n 54.22,\n 23.3,\n 25.53,\n 23.31,\n 118.0,\n 103.53,\n 54.40,\n 103.48,\n 6.14,\n 7.25,\n 2.38,\n 18.18,\n 103.54,\n 103.40,\n 28.59,\n 25.21,\n 29.35,\n 25.20,\n ]\n )\n\n lat = np.array(\n [\n 52.22,\n 1.14,\n 27.34,\n 25.16,\n 1.16,\n 1.11,\n 24.30,\n 37.54,\n 37.26,\n 38.1,\n 24.25,\n 1.13,\n 24.49,\n 1.13,\n 42.33,\n 43.44,\n 39.34,\n 70.30,\n 1.16,\n 1.10,\n 40.58,\n 37.34,\n 41.18,\n 38.35,\n ]\n )\n\n time = np.zeros(len(lon))\n\n X_data = np.vstack((time, lat, lon)).T\n\n X_name = np.array([0, 1, 2, 2, 2, 2, 3, 3, 3]) # NOQA\n X_data = np.array(\n [\n (0, 42.727985, -73.683994), # MRC\n (0, 42.657872, -73.764148), # Home\n (0, 42.657414, -73.774448), # Park1\n (0, 42.658333, -73.770993), # Park2\n (0, 42.654384, -73.768919), # Park3\n (0, 42.655039, -73.769048), # Park4\n (0, 42.876974, -73.819311), # CP1\n (0, 42.862946, -73.804977), # CP2\n (0, 42.849809, -73.758486), # CP3\n ]\n )\n return X_name, X_data", "def nomenclatura():\n df = pd.read_csv(\"Data/nomenclatura_1.csv\", encoding = \"latin1\")\n #dict_axis = df.set_index('id').T.to_dict('list')\n dict_axis = dict( [ (i, [a,b]) for i, a,b in zip(df.id, df.latitude, df.longitude) ] )\n\n return dict_axis", "def geo_transform(self):\n pass", "def add_amenities(self): \n amenity_count = [self.search(lat, lon) for lat, lon in\n zip(self.df_ads['latitude'], self.df_ads['longitude'])]\n \n self.df_ads_mapdata = pd.concat(\n [self.df_ads.reset_index(drop=True), pd.DataFrame(amenity_count)], axis=1)\n\n assert len(self.df_ads_mapdata) == len(self.df_ads)", "def add_features(df):\n \n assert df.columns.str.contains(\"query|value|keyword|ranking|timestamp|geo\").all(), \"Add features failed. \\\n Missing one of [query, value, keyword, ranking, timestamp, geo]\"\n \n # feature engineering: totals and normalize\n grouped = df.groupby(['ranking']).value # group values by ranking\n df['value_total'] = grouped.transform('sum') # total sum \n df['value_normalized'] = (df.value-grouped.transform('min'))/(grouped.transform('max')-grouped.transform('min')) # normalize \n df['value_normalized_total'] = df.groupby(['ranking']).value_normalized.transform('sum') # total sum of normalized values \n df['date'] = pd.to_datetime(df.query_timestamp).dtd\n \n return df", "def get_rain_grid_coords(directory=\"rain_grid_coordinates\"):\n lon, lat = [pd.DataFrame([re.findall('..\\......', row[0]) for idx,\n row in pd.read_table(sys.path[0]+f\"/{directory}/{file}_center.txt\",\n header=None).iterrows()]) for file in ['lambda', 'phi']]\n coords = pd.DataFrame(columns={\"LAT\", \"LON\"})\n coords[\"LAT\"] = np.round(pd.Series([item for sublist in lat.values.tolist() for item in sublist]).astype(float), 4)\n coords[\"LON\"] = np.round(pd.Series([item for sublist in lon.values.tolist() for item in sublist]).astype(float), 4)\n coords[\"CELL_ID\"] = coords.index.values\n return coords", "def add_averages_to_map_dataframe(dataframe, map_dataframe):\n\n # List of boroughs\n boroughs = [\"BROOKLYN\", \"BRONX\", \"QUEENS\", \"MANHATTAN\", \"STATEN ISLAND\"]\n \n # Making a copy of the original map_dataframe\n map_dataframe_copy = map_dataframe.copy()\n \n # Renaming columns\n dataframe.columns = boroughs\n # Adding a new column named 'ZIPCODES' and its value will be the actual zipcodes from each borough\n dataframe['ZIPCODES'] = list(dataframe.index)\n \n # \n # Iterating through all columns (each column is a borough)\n # But a zipcode is only located in one of the 5 boroughs. So, one column will \n # hold a useful value while the other 4 will hold NaN\n # Dataframe looks like this:\n # BROOKLYN BRONX QUEENS MANHATTAN STATENISLAND\n # 11214 400k Nan NaN NaN NaN\n # 10303 NaN NaN NaN NaN 700k\n # We are finding that useful value and ignoring the NaN\n # Creating a list that looks like the following, however the zipcode is implicit:\n # AVERAGE\n # 400k (zipcode is 11214)\n # 700k (zipcode is 10303) \n averages = []\n for index, row in dataframe.iterrows():\n average = 0\n for bor in boroughs:\n if (not(math.isnan(row[bor]))):\n average = row[bor]\n averages.append(average)\n # Adding the values found for each zipcode as its own column in the dataframe\n dataframe['AVERAGES'] = averages\n \n # The ZIPCODE column on the dataframe might not exactly match the ZIPCODE column in the map_dataframe\n # (as they were taken from two different sources)\n # so, we need to match them up\n list_of_zipcodes_old = list(dataframe.index)\n list_of_zipcodes_new = list(map_dataframe_copy['ZIPCODE'])\n new_average_list = combine_lists(list_of_zipcodes_old, list_of_zipcodes_new, averages)\n\n # for zip in list_of_zipcodes_new:\n # if zip in list_of_zipcodes_old:\n # index = list_of_zipcodes_old.index(zip)\n # new_average_list.append(averages[index])\n # else:\n # new_average_list.append(0)\n \n # Dropping columns that are not needed (the boroughs)\n dataframe = dataframe.drop(boroughs, axis=1)\n\n # Adding a new column to the shape dataframe to hold the averages\n map_dataframe_copy['AVERAGES'] = new_average_list\n return map_dataframe_copy", "def prepare_for_influxdb(df):\n df = df.drop(columns=\"landkreis\", errors=\"ignore\") # prevent name collision in get_ags()\n df = get_ags(df)\n df[\"time\"] = df.apply(lambda x: 1000000000*int(datetime.timestamp((pd.to_datetime(x[\"timestamp\"])))), 1)\n df[\"measurement\"] = \"hystreet\"\n df[\"origin\"] = \"https://hystreet.com\"\n df = df.rename(columns={\n 'station_id': '_id',\n 'pedestrians_count': 'pedestrian_count',\n 'state': 'bundesland'\n })\n df['ags'] = pd.to_numeric(df['ags'])\n # import pdb; pdb.set_trace()\n return df", "def read_long_lat_proxi():\n session = Session()\n # data est une liste de tuple\n long_lat_proxi_data = session.query(Prix_Median.longitude,\n Prix_Median.latitude,\n Prix_Median.ocean_proximity_str,\n Prix_Median.ocean_proximity).all()\n session.close()\n list_long_lat = DataFrame(long_lat_proxi_data)\n list_long_lat = list_long_lat.drop_duplicates()\n return list_long_lat", "def create_table(f, geoinfo):\n bounds_cols = xb_points + yb_points\n df = pd.read_csv(f, delimiter=\";\", index_col=\"INDEX_RC\")\n df[duration_name] = parse_duration_level(f)\n df = df.join(geoinfo[[\"X_CENT_GEO\", \"Y_CENT_GEO\", \"Col\", \"Row\"]])\n df = df.rename(columns={\"Col\": x, \"Row\": y, \"X_CENT_GEO\": lon, \"Y_CENT_GEO\": lat})\n return df", "def populateNewFields(nadPoints):\n with arcpy.da.UpdateCursor(nadPoints,\n ['SHAPE@X', 'SHAPE@Y', 'longitude', 'latitude', 'Source'],\n spatial_reference=arcpy.SpatialReference(4326)) as cursor:\n for row in cursor:\n row[2] = row[0]\n row[3] = row[1]\n row[4] = 'Utah AGRC'\n cursor.updateRow(row)", "def get_lat_and_long(row):\r\n\tlatitude = row['latitude']\r\n\tlongitude = row['longitude']\r\n\treturn latitude, longitude", "def _wrangle(self):\n\n dataframe = self._cache.get_source(config.DATAFRAME_SONG_DATA)\n\n dataframe = dataframe \\\n .dropDuplicates(['artist_id']) \\\n .where(dataframe.artist_id != '') \\\n .select([\n 'artist_id',\n 'artist_name',\n 'artist_location',\n 'artist_latitude',\n 'artist_longitude'\n ]) \\\n .withColumnRenamed('artist_name', 'name') \\\n .withColumnRenamed('artist_location', 'location') \\\n .withColumnRenamed('artist_latitude', 'latitude') \\\n .withColumnRenamed('artist_longitude', 'longitude')\n\n self._cache.set_source(config.DATAFRAME_ARTISTS, dataframe)", "def __insert_artist_data(cur, df):\n artist_data = (\n df.artist_id.values[0],\n df.artist_name.values[0],\n df.artist_location.values[0],\n (df.artist_latitude.values[0]).item(),\n (df.artist_longitude.values[0]).item()\n )\n cur.execute(artist_table_insert, artist_data)", "def is_in_dublin(self, df):\n Dublin = (53.346300, -6.263100)\n searchgrid = self.get_searchgrid(Dublin, distance=30)\n\n df.loc[df['latitude'] > searchgrid['North'],\n ['longitude', 'latitude']] = np.nan\n df.loc[df['latitude'] < searchgrid['South'],\n ['longitude', 'latitude']] = np.nan\n df.loc[df['longitude'] > searchgrid['East'],\n ['longitude', 'latitude']] = np.nan\n df.loc[df['longitude'] < searchgrid['West'],\n ['longitude', 'latitude']] = np.nan\n\n return(df)", "def set_location(self, lat, long):\n self._data['loc'] = [lat, long]", "def makeGeoDf(self, arr: dict):\n geometry_points = [Point(x, y) for x, y in zip(arr[\"X\"], arr[\"Y\"])]\n elevetions = arr[\"Z\"]\n df = gpd.GeoDataFrame(columns=[\"elevation\", \"geometry\"])\n df['elevation'] = elevetions\n df['geometry'] = geometry_points\n df = df.set_geometry(\"geometry\")\n df.set_crs(self.output_epsg, inplace=True)\n return df", "def get_places() -> DataFrame:\n df = pd.read_csv('./data/geoplaces2.csv', encoding='utf-8')\n\n # drop useless columns\n df.drop(columns=['the_geom_meter', 'name', 'address',\n 'city', 'state', 'country', 'fax',\n 'zip', 'url', 'accessibility', 'franchise',\n 'other_services'],\n inplace=True)\n\n # select categorical column names\n categorical_columns = [column for column in df.columns\n if df[column].dtype.name == 'object'\n if column not in ['userID', 'smoker']]\n\n # replace categorical columns with one hot encoding\n for column_name in categorical_columns:\n dummies = pd.get_dummies(df[column_name])\n\n for dummy_column_name in dummies.columns:\n df[column_name + \"_\" + dummy_column_name] = dummies[dummy_column_name]\n\n df.drop(columns=[column_name], inplace=True)\n\n categorical_columns = [column for column in df.columns if df[column].dtype.name == 'object']\n\n for column in categorical_columns:\n df[column] = df[column].astype('category')\n\n df_cuisine = get_place_secondary_df('cuisine', 'Rcuisine')\n df_payment = get_place_secondary_df('accepts', 'Rpayment')\n df_hours = get_place_hours()\n\n payment_columns = list(filter(lambda x: x.startswith(\"Raccepts_\"), df_payment.columns))\n\n # some restaurants don't have specified payment ... but why\n # left join payment options and set cash option\n new_df = df.merge(df_payment, on='placeID', how='left')\n new_df[payment_columns] = new_df[payment_columns].fillna(0)\n new_df['Raccepts_cash'] = 1\n\n # left join cuisines and fill missing values with 0\n new_df = new_df.merge(df_cuisine, on='placeID', how='left')\n cuisine_columns = list(filter(lambda x: \"Rcuisine\" in x, new_df.columns))\n new_df[cuisine_columns] = new_df[cuisine_columns].fillna(0)\n\n new_df = new_df.merge(df_hours, on='placeID', how='inner')\n\n return new_df", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def set_direction(turbineLoc, rotation_angle):\n theta = np.deg2rad(rotation_angle)\n R = np.matrix([[np.cos(theta), -np.sin(theta)],\n [np.sin(theta), np.cos(theta)]])\n\n xy = np.array([turbineLoc.x, turbineLoc.y])\n\n xy_rot = R * xy\n # return xy_rot\n #print(xy_rot)\n #print(xy_rot[0][0][0])\n df_return = turbineLoc.copy(deep=True)\n df_return['x'] = np.squeeze(np.asarray(xy_rot[0, :]))\n df_return['y'] = np.squeeze(np.asarray(xy_rot[1, :]))\n return df_return", "def get_coordinates(table, replace_columns=False, remove_nans=False):\n assert \"zip code\" in table.labels or ((\"city\" in table.labels or \"county\" in table.labels) and \"state\" in table.labels)\n ref = Table.read_table(pkg_resources.resource_filename(__name__, \"geodata/geocode_states.csv\"))\n\n index_name = \"\".join(table.labels) # Ensures that index can't possibly be one of the preexisting columns\n index_name += \" \"\n \n table = table.with_columns(index_name, np.arange(table.num_rows))\n lat = np.array([np.nan] * table.num_rows)\n lon = np.array([np.nan] * table.num_rows)\n unassigned = set(range(table.num_rows)) \n while len(unassigned) > 0:\n index = unassigned.pop()\n row = table.take(index).take(0)\n if \"zip code\" in table.labels:\n select = table.where(\"zip code\", row[\"zip code\"][0]).column(index_name)\n unassigned -= set(select)\n try:\n ref_lat, ref_lon = ref.where(\"zip\", int(row[\"zip code\"][0])).select(\"lat\", \"lon\").row(0)\n lat[select] = ref_lat\n lon[select] = ref_lon\n except IndexError:\n pass\n else:\n state_select = table.where(\"state\", row[\"state\"][0]).column(index_name)\n county_select = table.where(\"county\", row[\"county\"][0]).column(index_name) if \"county\" in table.labels else np.arange(table.num_rows)\n city_select = table.where(\"city\", row[\"city\"][0]).column(index_name) if \"city\" in table.labels else np.arange(table.num_rows)\n select = set.intersection(set(state_select), set(county_select), set(city_select))\n unassigned -= select\n select = list(select)\n try:\n matched_ref = ref.where(\"state\", row[\"state\"][0])\n if \"county\" in table.labels:\n matched_ref = matched_ref.where(\"county\", row[\"county\"][0].lower())\n if \"city\" in table.labels:\n matched_ref = matched_ref.where(\"city\", row[\"city\"][0].lower())\n ref_lat, ref_lon = matched_ref.select(\"lat\", \"lon\").row(0)\n lat[select] = ref_lat\n lon[select] = ref_lon\n except IndexError:\n pass\n table = table.with_columns(\"lat\", lat, \"lon\", lon)\n table = table.drop(index_name)\n if replace_columns:\n for label in [\"county\", \"city\", \"zip code\", \"state\"]:\n try:\n table = table.drop(label)\n except KeyError:\n pass\n if remove_nans: \n table = table.where(\"lat\", are.below(float(\"inf\"))) # NaNs are not considered to be smaller than infinity\n return table", "def prepare_spatial_weights_data(weights_file):\n\n df = pd.read_csv(weights_file)\n\n # Re-label out-of-bounds pixel centers\n df.set_value((df[\"pix_cent_x\"] == 180.125), \"pix_cent_x\", -179.875)\n\n # probably totally unnecessary\n df.drop_duplicates()\n df.index.names = [\"reshape_index\"]\n\n df.rename(columns={\"pix_cent_x\": \"lon\", \"pix_cent_y\": \"lat\"}, inplace=True)\n\n return df", "def vectorize(df):\n\tt = calc_affine(df)\n\ta = df.values\n\t# zeros an nan are left open space, means mask = True!\n\tmaske = (df != 0).fillna(True)\n\tgdf = gpd.GeoDataFrame()\n\tgeoms = []\n\tvalue = []\n\tfor s,v in rasterio.features.shapes(a,transform=t,mask=maske.values):\n\t\tgeoms.append(shape(s))\n\t\tvalue.append(v)\n\tgdf['geometry'] = geoms\n\tgdf = gdf.set_geometry('geometry')\n\tgdf['val']=value\n\treturn gdf", "def get_update_test_walk_line() -> gpd.GeoDataFrame:\n walk_proj = gpd.read_file('data/tests/test_walk_line.shp')\n walk_proj['length'] = [int(round(geom.length)) for geom in walk_proj['geometry']]\n walk_proj['time'] = [round((geom.length/1.33)/60, 1) for geom in walk_proj['geometry']]\n # walk_proj.to_file('data/test/test_walk_line.shp')\n return walk_proj", "def prepare_data(data, target, lon=\"x\", lat=\"y\"):\n # covariates = [col for col in data.columns if col not in [lon, lat, target]]\n # return data[covariates], data[target], data[[lon, lat]]\n return data.rename(\n columns={target: \"label\", lon: \"x_coord\", lat: \"y_coord\"}\n )", "def _add_accelerations_to_df(self, route_df, a_prof):\n # print(route_df.head())\n accelerations = self._calculate_acceleration(route_df, a_prof)\n\n #Assign acceleration values to new row in route DataFrame.\n route_df = route_df.assign(\n acceleration=accelerations\n )\n\n return route_df", "def set_coord_values(ds, wrf_out, footprint_nbins):\n xdim_var = ds.variables[\"dim_x\"]\n ydim_var = ds.variables[\"dim_y\"]\n xdim_bounds_var = ds.variables[\"dim_x_bnds\"]\n ydim_bounds_var = ds.variables[\"dim_y_bnds\"]\n lon_var = ds.variables[\"longitude\"]\n lat_var = ds.variables[\"latitude\"]\n\n time_back_var = ds.variables[\"time_before_observation\"]\n time_back_bounds_var = ds.variables[\"time_before_observation_bnds\"]\n\n height_var = ds.variables[\"height\"]\n height_bounds_var = ds.variables[\"height_bnds\"]\n\n dx = wrf_out[\"dx\"]\n\n xdim_data = wrf_out[\"proj_x_coord\"][0]\n ydim_data = wrf_out[\"proj_y_coord\"][0]\n xdim_var[:] = xdim_data[:]\n ydim_var[:] = ydim_data[:]\n\n xdim_bounds_var[:-1,:] = np.column_stack((xdim_data[:-1], xdim_data[1:]))\n xdim_bounds_var[-1,0] = xdim_data[-1]\n xdim_bounds_var[-1,1] = xdim_data[-1] + dx\n ydim_bounds_var[:-1,:] = np.column_stack((ydim_data[:-1], ydim_data[1:]))\n ydim_bounds_var[-1,0] = ydim_data[-1]\n ydim_bounds_var[-1,1] = ydim_data[-1] + dx\n\n wrf_lats = wrf_out[\"wrf_lat\"][0][0, :, :]\n wrf_lons = wrf_out[\"wrf_lon\"][0][0, :, :]\n lat_var[:, :] = wrf_lats[:, :]\n lon_var[:, :] = wrf_lons[:, :]\n\n ds.geospatial_lat_min = wrf_lats.min()\n ds.geospatial_lat_max = wrf_lats.max()\n ds.geospatial_lat_units = \"degree_north\"\n ds.geospatial_lon_min = wrf_lons.min()\n ds.geospatial_lon_max = wrf_lons.max()\n ds.geospatial_lon_units = \"degree_east\"\n\n time_back_vals = np.arange(0, footprint_nbins * FLUX_WINDOW, FLUX_WINDOW)\n time_back_var[:] = time_back_vals\n time_back_bounds_var[:-1,:] = np.column_stack((time_back_vals[:-1],\n time_back_vals[1:]))\n time_back_bounds_var[-1,:] = time_back_vals[-2:] + FLUX_WINDOW\n\n height_var[...] = 0\n height_bounds_var[:] = (0, CLOSE_TO_GROUND)", "def row_to_geojson(row, lon, lat):\n\n # Let pandas handle json serialization\n row_json = json.loads(row.to_json(date_format='epoch', date_unit='s'))\n return geojson.Feature(geometry=geojson.Point((row_json[lon], row_json[lat])),\n properties={key: row_json[key] for key in row_json.keys() if key not in [lon, lat]})", "def convert_GeoPandas_to_Bokeh_format(gdf):\r\n gdf_new = gdf.drop('geometry', axis=1).copy()\r\n gdf_new['x'] = gdf.apply(getGeometryCoords, \r\n geom='geometry', \r\n coord_type='x', \r\n shape_type='polygon', \r\n axis=1)\r\n \r\n gdf_new['y'] = gdf.apply(getGeometryCoords, \r\n geom='geometry', \r\n coord_type='y', \r\n shape_type='polygon', \r\n axis=1)\r\n \r\n return ColumnDataSource(gdf_new)", "def transform_geopandas(gdf, from_crs=None, to_crs=wgs84, inplace=False):\n from shapely.ops import transform\n import geopandas as gpd\n\n if from_crs is None:\n from_crs = check_crs(gdf.crs)\n else:\n from_crs = check_crs(from_crs)\n to_crs = check_crs(to_crs)\n\n if inplace:\n out = gdf\n else:\n out = gdf.copy()\n\n if isinstance(to_crs, pyproj.Proj) and isinstance(from_crs, pyproj.Proj):\n project = partial(transform_proj, from_crs, to_crs)\n elif isinstance(to_crs, Grid):\n project = partial(to_crs.transform, crs=from_crs)\n elif isinstance(from_crs, Grid):\n project = partial(from_crs.ij_to_crs, crs=to_crs)\n else:\n raise NotImplementedError()\n\n # Do the job and set the new attributes\n result = out.geometry.apply(lambda geom: transform(project, geom))\n result.__class__ = gpd.GeoSeries\n if isinstance(to_crs, pyproj.Proj):\n to_crs = to_crs.srs\n elif isinstance(to_crs, Grid):\n to_crs = None\n result.crs = to_crs\n out.geometry = result\n out.crs = to_crs\n out['min_x'] = [g.bounds[0] for g in out.geometry]\n out['max_x'] = [g.bounds[2] for g in out.geometry]\n out['min_y'] = [g.bounds[1] for g in out.geometry]\n out['max_y'] = [g.bounds[3] for g in out.geometry]\n return out", "def create_cols_distances(df):\n #create a column for haversine distance\n df['distance'] = haversine_array(df['pickup_longitude'], df['pickup_latitude'],\n df['dropoff_longitude'], df['dropoff_latitude'])\n\n df['manhattan_distance'] = dummy_manhattan_distance(df['pickup_longitude'], df['pickup_latitude'],\n df['dropoff_longitude'], df['dropoff_latitude'])\n\n df['bearing'] = bearing_array(df['pickup_longitude'], df['pickup_latitude'],\n df['dropoff_longitude'], df['dropoff_latitude'])\n\n return df", "def load_loss_GDF(filename, lon, lat):\n df = pd.read_csv(filename)\n x, y = np.meshgrid(lon, lat)\n coords = [Point(xval, yval) for xval, yval in zip(x.ravel(), y.ravel())]\n \n df['geometry'] = coords\n df = gpd.GeoDataFrame(df)\n df.crs = {'init': 'epsg:4326'}\n return df", "def add_cols_to_cleaned_df(df):\n\n core_cols = ['time','lat','lon','depth','year','month','week','dayofyear','float_id','cycle']\n template_cols = core_cols + bgc_data_columns\n template_df = pd.DataFrame(columns=template_cols)\n df = template_df.append(df)[template_cols]\n return df", "def convertLatLon(latCell, lonCell):\n cell_lats = np.array([])\n cell_lons = np.array([])\n for lat in latCell:\n cell_lats = np.append(cell_lats, lat * (180 / np.pi)) \n for lon in lonCell:\n cell_lons = np.append(cell_lons, lon * (180 / np.pi)) \n\n return cell_lats, cell_lons", "def iat_df(self, df):\n result = self.iat(**df).reset_coords(drop=True).to_dataframe()\n if isinstance(df, pd.DataFrame):\n result.index = df.index\n return result", "def output_grid_information():\n # translate = [-74.26, 40.50]\n # scale = [0.02, 0.02]\n # step = 1\n\n translate = [0, 0]\n scale = [1, 1]\n step = 0.02\n\n lon_limits = [(-74.26 - translate[0]) / scale[0], (-73.76 - translate[0]) / scale[0]]\n lat_limits = [(40.48 - translate[1]) / scale[1], (40.94 - translate[1]) / scale[1]]\n\n lons = np.arange(lon_limits[0], lon_limits[1] - step, step)\n lats = np.arange(lat_limits[0], lat_limits[1] - step, step)\n\n all_json = {\n \"type\": \"FeatureCollection\"\n }\n\n gr_id = 0\n grid_df = pd.DataFrame(columns=['gr_id', 'c_lat', 'c_lon', 's_lon', 'w_lat', 'n_lon', 'e_lat'])\n features = []\n\n for lat in lats:\n for lon in lons:\n w_lon = lon\n e_lon = lon + step\n s_lat = lat\n n_lat = lat + step\n\n c_lon = lon + step / 2\n c_lat = lat + step / 2\n\n grid_df = grid_df.append(pd.DataFrame({\"gr_id\": [gr_id],\n \"c_lon\": [c_lon], \"c_lat\": [c_lat],\n \"w_lon\": [w_lon], \"s_lat\": [s_lat],\n \"e_lon\": [e_lon], \"n_lat\": [n_lat]}))\n\n coor = [[[s_lat, w_lon], [n_lat, w_lon], [n_lat, e_lon],\n [s_lat, e_lon], [s_lat, w_lon]]]\n\n feature = {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": coor\n },\n \"properties\": {\n \"id\": str(gr_id)\n }\n }\n\n features.append(feature)\n\n gr_id += 1\n\n all_json['features'] = features\n\n with open(BaseDir + '/grid.geojson', 'w') as f:\n json.dump(all_json, f)\n\n grid_df.to_csv(BaseDir + '/grid_locs.csv', index=False)", "def get_nx10(location):\n cols = ['id', 'acc_x', 'acc_y', 'acc_z', 'gy_x', 'gy_y', 'gy_z', 'mag_x', 'mag_y', 'mag_z'] \n array = np.genfromtxt(location, delimiter=\",\")\n df = DataFrame(array).dropna()\n df.columns = cols\n return(df)", "def coordinates_str(info_df: DataFrame, lat: float, lon: float) -> str:\n lat_precision = attribute_value(info_df, \"geospatial_lat_resolution\")\n lat_value = str(round_to(lat, lat_precision)).split(\".\")\n\n lat_str = (\n f\"[({lat_value[0]}.{lat_value[1][:2]}):1:({lat_value[0]}.{lat_value[1][:2]})]\"\n )\n\n lon_precision = attribute_value(info_df, \"geospatial_lon_resolution\")\n lon_value = str(round_to(lon, lon_precision)).split(\".\")\n\n lon_str = (\n f\"[({lon_value[0]}.{lon_value[1][:2]}):1:({lon_value[0]}.{lon_value[1][:2]})]\"\n )\n\n return lat_str + lon_str", "def data_from_dataframe(self, dataframe):\n self.dataframe = dataframe.drop_duplicates()\n #Convert numerical values into float type\n self.dataframe.apply(pandas.to_numeric, errors='ignore')\n #Convert timestamps into regular dates\n time_range = [datetime.datetime.fromtimestamp(time) for time in list(self.dataframe['time'])]\n beg = time_range[0]\n end = time_range[len(time_range)-1]\n #Attribute begining and ending dates\n self.beg = beg\n self.end = end" ]
[ "0.63746387", "0.63202995", "0.60851926", "0.6078423", "0.6073065", "0.60551715", "0.60244256", "0.59730685", "0.59513515", "0.5890462", "0.5884484", "0.58564293", "0.58458704", "0.58379817", "0.58235085", "0.5796834", "0.5791018", "0.5752113", "0.5700064", "0.5698395", "0.56887555", "0.5678902", "0.5663754", "0.56479895", "0.56412184", "0.56067526", "0.55637765", "0.5534452", "0.55316406", "0.55287206", "0.5521558", "0.5509337", "0.5506952", "0.5503666", "0.549836", "0.54758966", "0.547307", "0.5466339", "0.5433818", "0.54335546", "0.54334545", "0.54321915", "0.54214156", "0.54151076", "0.5413368", "0.54097605", "0.5398654", "0.5381583", "0.53693223", "0.5360816", "0.5355222", "0.5341851", "0.5337758", "0.53325385", "0.53249097", "0.5308095", "0.5303876", "0.5298525", "0.5266372", "0.5261523", "0.5253055", "0.5251266", "0.5245257", "0.5224836", "0.5222113", "0.52165", "0.52140844", "0.5210683", "0.5204914", "0.5194993", "0.51654243", "0.51604474", "0.51591456", "0.5157545", "0.515352", "0.5151316", "0.5150543", "0.514929", "0.5127859", "0.51275367", "0.5126518", "0.5126504", "0.51090044", "0.50989115", "0.5094434", "0.50840634", "0.5081688", "0.50764424", "0.5070731", "0.5063218", "0.505505", "0.50459373", "0.50423086", "0.504201", "0.50353485", "0.50315285", "0.50039077", "0.49984962", "0.49984467", "0.4994327" ]
0.79349303
0
This function creates outputs (x,y,z) tile coordinate files which can be fed into download_tiles.sh or the save_tiles function to get tiles from the OSM server.
def basic_tileset(geo_dict, zooms, buffer = 0,n_neg = None): if not len(geo_dict['elements']): raise ValueError("The query is empty - cannot continue!") if type(zooms) is int: zooms = [zooms] if any(z < 2 or z > 19 for z in zooms): raise ValueError("all zoom levels must be between 2 and 19") nodes = atomize_features(geo_dict) points_list = [(node['lat'],node['lon']) for node in nodes] pos_DFs, neg_DFs = [], [] for zoom in zooms: zxy = [(zoom,*deg2num(x,y,zoom)) for x,y in points_list] pos_df = pd.DataFrame.from_records(zxy,columns = ['z','x','y'])\ .drop_duplicates(subset = ['x','y']) num_neg = pos_df.shape[0] if n_neg is None else int(n_neg) neg_x, neg_y = sample_complement(pos_df['x'],pos_df['y'],num_neg,buffer) neg_df = pd.DataFrame({'z': zoom,'x': neg_x,'y': neg_y}).sort_values(by = ['z','x','y']) pos_DFs.append(pos_df) neg_DFs.append(neg_df) out_pos = add_latlon(pd.concat(pos_DFs,axis = 0)) out_neg = add_latlon(pd.concat(neg_DFs,axis = 0)) common_row = pd.merge(out_pos,out_neg,on = ['z','x','y']).shape[0] if common_row > 0: raise RuntimeError(f"Somehow there are {common_row} common rows!") return {'positive': out_pos, 'negative': out_neg }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_metadata(self):\n if self.options.mbtiles:\n return\n if not os.path.exists(self.output):\n os.makedirs(self.output)\n\n if self.options.profile == 'mercator':\n\n south, west = self.mercator.MetersToLatLon( self.ominx, self.ominy)\n north, east = self.mercator.MetersToLatLon( self.omaxx, self.omaxy)\n south, west = max(-85.05112878, south), max(-180.0, west)\n north, east = min(85.05112878, north), min(180.0, east)\n self.swne = (south, west, north, east)\n\n # Generate googlemaps.html\n if self.options.webviewer in ('all','google') and self.options.profile == 'mercator':\n if not self.options.resume or not os.path.exists(os.path.join(self.output, 'googlemaps.html')):\n f = open(os.path.join(self.output, 'googlemaps.html'), 'w')\n f.write( self.generate_googlemaps() )\n f.close()\n\n # Generate openlayers.html\n if self.options.webviewer in ('all','openlayers'):\n if not self.options.resume or not os.path.exists(os.path.join(self.output, 'openlayers.html')):\n f = open(os.path.join(self.output, 'openlayers.html'), 'w')\n f.write( self.generate_openlayers() )\n f.close()\n\n elif self.options.profile == 'geodetic':\n\n west, south = self.ominx, self.ominy\n east, north = self.omaxx, self.omaxy\n south, west = max(-90.0, south), max(-180.0, west)\n north, east = min(90.0, north), min(180.0, east)\n self.swne = (south, west, north, east)\n\n # Generate openlayers.html\n if self.options.webviewer in ('all','openlayers'):\n if not self.options.resume or not os.path.exists(os.path.join(self.output, 'openlayers.html')):\n f = open(os.path.join(self.output, 'openlayers.html'), 'w')\n f.write( self.generate_openlayers() )\n f.close()\n\n elif self.options.profile in ['raster','gearth','garmin']:\n\n west, south = self.ominx, self.ominy\n east, north = self.omaxx, self.omaxy\n\n self.swne = (south, west, north, east)\n\n # Generate openlayers.html\n if self.options.webviewer in ('all','openlayers'):\n if not self.options.resume or not os.path.exists(os.path.join(self.output, 'openlayers.html')):\n f = open(os.path.join(self.output, 'openlayers.html'), 'w')\n f.write( self.generate_openlayers() )\n f.close()\n\n\n # Generate tilemapresource.xml.\n if (self.options.tile_format != 'hybrid' and self.options.profile != 'garmin'\n and (not self.options.resume or not os.path.exists(os.path.join(self.output, 'tilemapresource.xml')))):\n f = open(os.path.join(self.output, 'tilemapresource.xml'), 'w')\n f.write( self.generate_tilemapresource())\n f.close()", "def mbtiles(ctx, files, output, overwrite, title, description,\n layer_type, img_format, tile_size, zoom_levels, image_dump,\n num_workers, src_nodata, dst_nodata, resampling):\n output, files = resolve_inout(files=files, output=output,\n overwrite=overwrite)\n inputfile = files[0]\n\n logger = logging.getLogger('rio-mbtiles')\n\n with ctx.obj['env']:\n\n # Read metadata from the source dataset.\n with rasterio.open(inputfile) as src:\n\n validate_nodata(dst_nodata, src_nodata, src.profile.get('nodata'))\n base_kwds = {'dst_nodata': dst_nodata, 'src_nodata': src_nodata}\n\n if src_nodata is not None:\n base_kwds.update(nodata=src_nodata)\n\n if dst_nodata is not None:\n base_kwds.update(nodata=dst_nodata)\n\n # Name and description.\n title = title or os.path.basename(src.name)\n description = description or src.name\n\n # Compute the geographic bounding box of the dataset.\n (west, east), (south, north) = transform(\n src.crs, 'EPSG:4326', src.bounds[::2], src.bounds[1::2])\n\n # Resolve the minimum and maximum zoom levels for export.\n if zoom_levels:\n minzoom, maxzoom = map(int, zoom_levels.split('..'))\n else:\n zw = int(round(math.log(360.0 / (east - west), 2.0)))\n zh = int(round(math.log(170.1022 / (north - south), 2.0)))\n minzoom = min(zw, zh)\n maxzoom = max(zw, zh)\n\n logger.debug(\"Zoom range: %d..%d\", minzoom, maxzoom)\n\n # Parameters for creation of tile images.\n base_kwds.update({\n 'driver': img_format.upper(),\n 'dtype': 'uint8',\n 'nodata': 0,\n 'height': tile_size,\n 'width': tile_size,\n 'count': 3,\n 'crs': TILES_CRS})\n\n img_ext = 'jpg' if img_format.lower() == 'jpeg' else 'png'\n\n # Initialize the sqlite db.\n if os.path.exists(output):\n os.unlink(output)\n # workaround for bug here: https://bugs.python.org/issue27126\n sqlite3.connect(':memory:').close()\n\n conn = sqlite3.connect(output)\n cur = conn.cursor()\n cur.execute(\n \"CREATE TABLE tiles \"\n \"(zoom_level integer, tile_column integer, \"\n \"tile_row integer, tile_data blob);\")\n cur.execute(\n \"CREATE TABLE metadata (name text, value text);\")\n\n # Insert mbtiles metadata into db.\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"name\", title))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"type\", layer_type))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"version\", \"1.1\"))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"description\", description))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"format\", img_ext))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"bounds\", \"%f,%f,%f,%f\" % (west, south, east, north)))\n\n conn.commit()\n\n # Create a pool of workers to process tile tasks.\n pool = Pool(num_workers, init_worker,\n (inputfile, base_kwds, resampling), 100)\n\n # Constrain bounds.\n EPS = 1.0e-10\n west = max(-180 + EPS, west)\n south = max(-85.051129, south)\n east = min(180 - EPS, east)\n north = min(85.051129, north)\n\n # Initialize iterator over output tiles.\n tiles = mercantile.tiles(\n west, south, east, north, range(minzoom, maxzoom + 1))\n\n for tile, contents in pool.imap_unordered(process_tile, tiles):\n\n if contents is None:\n logger.info(\"Tile %r is empty and will be skipped\", tile)\n continue\n\n # MBTiles has a different origin than Mercantile/tilebelt.\n tiley = int(math.pow(2, tile.z)) - tile.y - 1\n\n # Optional image dump.\n if image_dump:\n img_name = '%d-%d-%d.%s' % (\n tile.x, tiley, tile.z, img_ext)\n img_path = os.path.join(image_dump, img_name)\n with open(img_path, 'wb') as img:\n img.write(contents)\n\n # Insert tile into db.\n cur.execute(\n \"INSERT INTO tiles \"\n \"(zoom_level, tile_column, tile_row, tile_data) \"\n \"VALUES (?, ?, ?, ?);\",\n (tile.z, tile.x, tiley, buffer(contents)))\n\n conn.commit()\n\n conn.close()\n # Done!", "def generate_test_data(root: str) -> str:\n size = (64, 64)\n folder_path = os.path.join(root, \"enviroatlas_lotp\")\n\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n for prefix in tile_list:\n for suffix, data_profile in layer_data_profiles.items():\n img_path = os.path.join(folder_path, f\"{prefix}_{suffix}.tif\")\n img_dir = os.path.dirname(img_path)\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n data_profile[\"profile\"][\"height\"] = size[0]\n data_profile[\"profile\"][\"width\"] = size[1]\n data_profile[\"profile\"][\"transform\"] = Affine(\n 1.0, 0.0, 608170.0, 0.0, -1.0, 3381430.0\n )\n\n write_data(\n img_path,\n data_profile[\"profile\"],\n data_profile[\"data_type\"],\n data_profile[\"vals\"],\n )\n\n # build the spatial index\n schema = {\n \"geometry\": \"Polygon\",\n \"properties\": {\n \"split\": \"str\",\n \"naip\": \"str\",\n \"nlcd\": \"str\",\n \"roads\": \"str\",\n \"water\": \"str\",\n \"waterways\": \"str\",\n \"waterbodies\": \"str\",\n \"buildings\": \"str\",\n \"lc\": \"str\",\n \"prior_no_osm_no_buildings\": \"str\",\n \"prior\": \"str\",\n },\n }\n with fiona.open(\n os.path.join(folder_path, \"spatial_index.geojson\"),\n \"w\",\n driver=\"GeoJSON\",\n crs=\"EPSG:3857\",\n schema=schema,\n ) as dst:\n for prefix in tile_list:\n img_path = os.path.join(folder_path, f\"{prefix}_a_naip.tif\")\n with rasterio.open(img_path) as f:\n geom = shapely.geometry.mapping(shapely.geometry.box(*f.bounds))\n geom = fiona.transform.transform_geom(\n f.crs.to_string(), \"EPSG:3857\", geom\n )\n\n row = {\n \"geometry\": geom,\n \"properties\": {\n \"split\": prefix.split(\"/\")[0].replace(\"_tiles-debuffered\", \"\")\n },\n }\n for suffix, data_profile in layer_data_profiles.items():\n key = suffix_to_key_map[suffix]\n row[\"properties\"][key] = f\"{prefix}_{suffix}.tif\"\n dst.write(row)\n\n # Create archive\n archive_path = os.path.join(root, \"enviroatlas_lotp\")\n shutil.make_archive(archive_path, \"zip\", root_dir=root, base_dir=\"enviroatlas_lotp\")\n shutil.rmtree(folder_path)\n md5: str = calculate_md5(archive_path + \".zip\")\n return md5", "def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + dx*TILE_SIZE\n offsety = BORDER_WIDTH + dy*TILE_SIZE\n view = image.view(offsetx, offsety, TILE_SIZE, TILE_SIZE)\n if imgtype:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix), imgtype)\n else:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix))", "def generate_metadata(self):\n\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n\n if self.options.profile == 'mercator':\n\n south, west = self.mercator.MetersToLatLon(self.ominx, self.ominy)\n north, east = self.mercator.MetersToLatLon(self.omaxx, self.omaxy)\n south, west = max(-85.05112878, south), max(-180.0, west)\n north, east = min(85.05112878, north), min(180.0, east)\n self.swne = (south, west, north, east)\n\n # Generate googlemaps.html\n if self.options.webviewer in ('all', 'google') and self.options.profile == 'mercator':\n if (not self.options.resume or not\n os.path.exists(os.path.join(self.output_folder, 'googlemaps.html'))):\n with open(os.path.join(self.output_folder, 'googlemaps.html'), 'wb') as f:\n f.write(self.generate_googlemaps().encode('utf-8'))\n\n # Generate openlayers.html\n if self.options.webviewer in ('all', 'openlayers'):\n if (not self.options.resume or not\n os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):\n with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:\n f.write(self.generate_openlayers().encode('utf-8'))\n\n # Generate leaflet.html\n if self.options.webviewer in ('all', 'leaflet'):\n if (not self.options.resume or not\n os.path.exists(os.path.join(self.output_folder, 'leaflet.html'))):\n with open(os.path.join(self.output_folder, 'leaflet.html'), 'wb') as f:\n f.write(self.generate_leaflet().encode('utf-8'))\n\n elif self.options.profile == 'geodetic':\n\n west, south = self.ominx, self.ominy\n east, north = self.omaxx, self.omaxy\n south, west = max(-90.0, south), max(-180.0, west)\n north, east = min(90.0, north), min(180.0, east)\n self.swne = (south, west, north, east)\n\n # Generate openlayers.html\n if self.options.webviewer in ('all', 'openlayers'):\n if (not self.options.resume or not\n os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):\n with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:\n f.write(self.generate_openlayers().encode('utf-8'))\n\n elif self.options.profile == 'raster':\n\n west, south = self.ominx, self.ominy\n east, north = self.omaxx, self.omaxy\n\n self.swne = (south, west, north, east)\n\n # Generate openlayers.html\n if self.options.webviewer in ('all', 'openlayers'):\n if (not self.options.resume or not\n os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):\n with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:\n f.write(self.generate_openlayers().encode('utf-8'))\n\n # Generate tilemapresource.xml.\n if not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'tilemapresource.xml')):\n with open(os.path.join(self.output_folder, 'tilemapresource.xml'), 'wb') as f:\n f.write(self.generate_tilemapresource().encode('utf-8'))\n\n if self.kml:\n # TODO: Maybe problem for not automatically generated tminz\n # The root KML should contain links to all tiles in the tminz level\n children = []\n xmin, ymin, xmax, ymax = self.tminmax[self.tminz]\n for x in range(xmin, xmax+1):\n for y in range(ymin, ymax+1):\n children.append([x, y, self.tminz])\n # Generate Root KML\n if self.kml:\n if (not self.options.resume or not\n os.path.exists(os.path.join(self.output_folder, 'doc.kml'))):\n with open(os.path.join(self.output_folder, 'doc.kml'), 'wb') as f:\n f.write(generate_kml(\n None, None, None, self.tileext, self.tilesize, self.tileswne,\n self.options, children\n ).encode('utf-8'))", "def save_tile(x,y,z,fpath):\n UA = \"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/77.0\"\n tile_url = f\"https://{random.choice('abc')}.tile.openstreetmap.org/{z}/{x}/{y}.png\"\n # cmd = f\"wget --user-agent='please download' -O {fpath} {url}\"\n if os.path.exists(fpath):\n print(f\"Already have tile {fpath}!\")\n return 0\n if os.path.isdir(fpath):\n raise ValueError(f\"requested path {fpath} exists and is a directory!\")\n try:\n res = rq.get(\n url=tile_url,\n headers={'User-Agent': UA}\n )\n status = res.status_code\n if status == 200:\n with open(fpath,'wb') as of:\n of.write(res.content)\n return 0\n else:\n print(f\"Error: response {status} from server:\\n{res.reason}\")\n return status\n except Exception as e:\n print(f\"Error getting tile: {e}\")\n return 1", "def create_output_files(self):\n namenode = self.runner.namenode\n for i in range(self.cnt_reducers):\n fname = '%s.%s' % (self.output_dir, reduce_output(self.id, i))\n namenode.create_file(fname)\n self.result_files.append(fname)\n self.open_files.append(fname)\n\n for j in range(self.cnt_mappers):\n fname = map_output(self.id, j, i)\n namenode.create_file(fname)\n self.open_files.append(fname)", "def save_tiles(df,output_dir,namefunc = None):\n if not isinstance(df,pd.core.frame.DataFrame):\n raise TypeError(\"df must be a pandas DataFrame!\")\n if any(e not in df.columns for e in ('z','x','y')):\n raise ValueError(\"df must have columns x, y, and z\")\n if namefunc is None:\n def namefunc(x,y,z):\n return f'{z}_{x}_{y}.png'\n\n opath = os.path.abspath(os.path.expanduser(output_dir))\n Path(opath).mkdir(parents=True, exist_ok=True)\n L = df.shape[0]\n flocs = [''] * L\n for i,xyz in enumerate(zip(df['x'],df['y'],df['z'])):\n x,y,z = xyz\n print(f\"({i+1} of {L})...\")\n sleep(0.75)\n outloc = os.path.join(opath,namefunc(x,y,z))\n if save_tile(x,y,z,outloc) == 0:\n flocs[i] = outloc\n df = df.assign(file_loc = flocs)\n return df[df['file_loc'] != '']", "def save_tiles(self, tiles, output_dir):\n save_path = f\"{output_dir}/tiles.npy\"\n tiles_np = np.asarray(tiles)\n np.save(save_path, tiles_np)\n print(\"done saving .npy!\")", "def create_overview_tiles(tile_job_info, output_folder, options):\n mem_driver = gdal.GetDriverByName('MEM')\n tile_driver = tile_job_info.tile_driver\n out_driver = gdal.GetDriverByName(tile_driver)\n\n tilebands = tile_job_info.nb_data_bands + 1\n\n # Usage of existing tiles: from 4 underlying tiles generate one as overview.\n\n tcount = 0\n for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):\n tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]\n tcount += (1 + abs(tmaxx-tminx)) * (1 + abs(tmaxy-tminy))\n\n ti = 0\n\n if tcount == 0:\n return\n\n if not options.quiet:\n print(\"Generating Overview Tiles:\")\n\n progress_bar = ProgressBar(tcount)\n progress_bar.start()\n\n for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):\n tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]\n for ty in range(tmaxy, tminy - 1, -1):\n for tx in range(tminx, tmaxx + 1):\n\n ti += 1\n ytile = GDAL2Tiles.getYtile(ty, tz, options)\n tilefilename = os.path.join(output_folder,\n str(tz),\n #str(tx),\n #\"%s.%s\" % (ytile, tile_job_info.tile_extension))\n '{0:04d}'.format(tx) + \"_\" + '{0:04d}'.format(ytile) + \".\" + tile_job_info.tile_extension)\n\n if options.verbose:\n print(ti, '/', tcount, tilefilename)\n\n if options.resume and os.path.exists(tilefilename):\n if options.verbose:\n print(\"Tile generation skipped because of --resume\")\n else:\n progress_bar.log_progress()\n continue\n\n # Create directories for the tile\n if not os.path.exists(os.path.dirname(tilefilename)):\n os.makedirs(os.path.dirname(tilefilename))\n\n dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size,\n 2 * tile_job_info.tile_size, tilebands)\n # TODO: fill the null value\n dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size,\n tilebands)\n\n # TODO: Implement more clever walking on the tiles with cache functionality\n # probably walk should start with reading of four tiles from top left corner\n # Hilbert curve\n\n children = []\n # Read the tiles and write them to query window\n for y in range(2 * ty, 2 * ty + 2):\n for x in range(2 * tx, 2 * tx + 2):\n minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1]\n if x >= minx and x <= maxx and y >= miny and y <= maxy:\n ytile2 = GDAL2Tiles.getYtile(y, tz+1, options)\n dsquerytile = gdal.Open(\n os.path.join(output_folder, str(tz + 1),\n '{0:04d}'.format(x) + \"_\" + '{0:04d}'.format(ytile2) + \".\" + tile_job_info.tile_extension),\n #str(x), \"%s.%s\" % (ytile2, tile_job_info.tile_extension)),\n gdal.GA_ReadOnly)\n if (ty == 0 and y == 1) or (ty != 0 and (y % (2 * ty)) != 0):\n tileposy = 0\n else:\n tileposy = tile_job_info.tile_size\n if tx:\n tileposx = x % (2 * tx) * tile_job_info.tile_size\n elif tx == 0 and x == 1:\n tileposx = tile_job_info.tile_size\n else:\n tileposx = 0\n dsquery.WriteRaster(\n tileposx, tileposy, tile_job_info.tile_size,\n tile_job_info.tile_size,\n dsquerytile.ReadRaster(0, 0,\n tile_job_info.tile_size,\n tile_job_info.tile_size),\n band_list=list(range(1, tilebands + 1)))\n children.append([x, y, tz + 1])\n\n scale_query_to_tile(dsquery, dstile, tile_driver, options,\n tilefilename=tilefilename)\n # Write a copy of tile to png/jpg\n if options.resampling != 'antialias':\n # Write a copy of tile to png/jpg\n out_driver.CreateCopy(tilefilename, dstile, strict=0)\n\n del dstile\n\n options.generatedFiles.append(tilefilename)\n # applyLegend(tilefilename, options.legendObj)\n\n if options.verbose:\n print(\"\\tbuild from zoom\", tz + 1,\n \" tiles:\", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty),\n (2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1))\n\n # # Create a KML file for this tile.\n # if tile_job_info.kml:\n # with open(os.path.join(\n # output_folder,\n # '%d/%d/%d.kml' % (tz, tx, ty)\n # ), 'wb') as f:\n # f.write(generate_kml(\n # tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,\n # get_tile_swne(tile_job_info, options), options, children\n # ).encode('utf-8'))\n\n if not options.verbose and not options.quiet:\n progress_bar.log_progress()", "def generate_base_tiles(self):\n\n if not self.options.quiet:\n print(\"Generating Base Tiles:\")\n\n if self.options.verbose:\n print('')\n print(\"Tiles generated from the max zoom level:\")\n print(\"----------------------------------------\")\n print('')\n\n # Set the bounds\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n\n ds = self.warped_input_dataset\n tilebands = self.dataBandsCount + 1\n querysize = self.querysize\n\n if self.options.verbose:\n print(\"dataBandsCount: \", self.dataBandsCount)\n print(\"tilebands: \", tilebands)\n\n tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n ti = 0\n\n tile_details = []\n\n tz = self.tmaxz\n for ty in range(tmaxy, tminy-1, -1):\n for tx in range(tminx, tmaxx+1):\n\n ti += 1\n ytile = GDAL2Tiles.getYtile(ty, tz, self.options)\n tilefilename = os.path.join(\n self.output_folder, str(tz), '{0:04d}'.format(tx) + \"_\" + '{0:04d}'.format(ytile) + \".\" + self.tileext)\n if self.options.verbose:\n print(ti, '/', tcount, tilefilename)\n\n if self.options.resume and os.path.exists(tilefilename):\n if self.options.verbose:\n print(\"Tile generation skipped because of --resume\")\n continue\n\n # Create directories for the tile\n if not os.path.exists(os.path.dirname(tilefilename)):\n os.makedirs(os.path.dirname(tilefilename))\n\n if self.options.profile == 'mercator':\n # Tile bounds in EPSG:3857\n b = self.mercator.TileBounds(tx, ty, tz)\n elif self.options.profile == 'geodetic':\n b = self.geodetic.TileBounds(tx, ty, tz)\n\n # Don't scale up by nearest neighbour, better change the querysize\n # to the native resolution (and return smaller query tile) for scaling\n\n if self.options.profile in ('mercator', 'geodetic'):\n rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])\n\n # Pixel size in the raster covering query geo extent\n nativesize = wb[0] + wb[2]\n if self.options.verbose:\n print(\"\\tNative Extent (querysize\", nativesize, \"): \", rb, wb)\n\n # Tile bounds in raster coordinates for ReadRaster query\n rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)\n\n rx, ry, rxsize, rysize = rb\n wx, wy, wxsize, wysize = wb\n\n else: # 'raster' profile:\n\n tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom\n xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels\n ysize = self.warped_input_dataset.RasterYSize\n if tz >= self.nativezoom:\n querysize = self.tilesize\n\n rx = (tx) * tsize\n rxsize = 0\n if tx == tmaxx:\n rxsize = xsize % tsize\n if rxsize == 0:\n rxsize = tsize\n\n rysize = 0\n if ty == tmaxy:\n rysize = ysize % tsize\n if rysize == 0:\n rysize = tsize\n ry = ysize - (ty * tsize) - rysize\n\n wx, wy = 0, 0\n wxsize = int(rxsize/float(tsize) * self.tilesize)\n wysize = int(rysize/float(tsize) * self.tilesize)\n if wysize != self.tilesize:\n wy = self.tilesize - wysize\n\n # Read the source raster if anything is going inside the tile as per the computed\n # geo_query\n tile_details.append(\n TileDetail(\n tx=tx, ty=ytile, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,\n wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,\n )\n )\n\n conf = TileJobInfo(\n src_file=self.tmp_vrt_filename,\n nb_data_bands=self.dataBandsCount,\n output_file_path=self.output_folder,\n tile_extension=self.tileext,\n tile_driver=self.tiledriver,\n tile_size=self.tilesize,\n kml=self.kml,\n tminmax=self.tminmax,\n tminz=self.tminz,\n tmaxz=self.tmaxz,\n in_srs_wkt=self.in_srs_wkt,\n out_geo_trans=self.out_gt,\n ominy=self.ominy,\n is_epsg_4326=self.isepsg4326,\n options=self.options,\n )\n\n return conf, tile_details", "def generate_openlayers( self ):\n\n args = {}\n args['title'] = self.options.title\n args['googlemapskey'] = self.options.googlekey\n args['yahooappid'] = self.options.yahookey\n args['south'], args['west'], args['north'], args['east'] = self.swne\n args['minzoom'] = self.tminz\n args['maxzoom'] = self.tmaxz\n args['tilesize'] = self.tilesize\n args['tileformat'] = format_extension[self.image_output.format]\n if self.image_output.format == \"PNG\":\n args['has_alpha'] = 'true'\n else:\n args['has_alpha'] = 'false'\n args['publishurl'] = \"\" if self.options.url is None else self.options.url\n args['copyright'] = self.options.copyright\n if self.options.profile in ('raster', 'gearth'):\n args['rasterzoomlevels'] = self.tmaxz+1\n args['rastermaxresolution'] = 2**(self.nativezoom) * self.out_gt[1]\n\n s = \"\"\"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n <html xmlns=\"http://www.w3.org/1999/xhtml>\"\n <head>\n <title>%(title)s</title>\n <meta http-equiv='imagetoolbar' content='no'/>\n <style type=\"text/css\"> v\\:* {behavior:url(#default#VML);}\n html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }\n body { margin: 10px; background: #fff; }\n h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }\n #header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }\n #subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}\n #map { height: 95%%; border: 1px solid #888; }\n </style>\"\"\" % args\n\n if self.options.profile == 'mercator':\n s += \"\"\"\n <script src='http://dev.virtualearth.net/mapcontrol/mapcontrol.ashx?v=6.1'></script>\n <script src='http://maps.google.com/maps?file=api&amp;v=2&amp;key=%(googlemapskey)s' type='text/javascript'></script>\n <script src=\"http://api.maps.yahoo.com/ajaxymap?v=3.0&amp;appid=%(yahooappid)s\"></script>\"\"\" % args\n\n s += \"\"\"\n <script src=\"http://www.openlayers.org/api/2.7/OpenLayers.js\" type=\"text/javascript\"></script>\n <script type=\"text/javascript\">\n var map;\n var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);\n var mapMinZoom = %(minzoom)s;\n var mapMaxZoom = %(maxzoom)s;\n\n // avoid pink tiles\n OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;\n OpenLayers.Util.onImageLoadErrorColor = \"transparent\";\n\n function init(){\"\"\" % args\n\n if self.options.profile == 'mercator':\n s += \"\"\"\n var options = {\n controls: [],\n projection: new OpenLayers.Projection(\"EPSG:900913\"),\n displayProjection: new OpenLayers.Projection(\"EPSG:4326\"),\n units: \"m\",\n maxResolution: 156543.0339,\n maxExtent: new OpenLayers.Bounds(-20037508, -20037508, 20037508, 20037508.34)\n };\n map = new OpenLayers.Map('map', options);\n\n // create Google Mercator layers\n var gmap = new OpenLayers.Layer.Google(\"Google Streets\",\n { sphericalMercator: true, numZoomLevels: 20} );\n var gsat = new OpenLayers.Layer.Google(\"Google Satellite\",\n {type: G_SATELLITE_MAP, sphericalMercator: true, numZoomLevels: 20} );\n var ghyb = new OpenLayers.Layer.Google(\"Google Hybrid\",\n {type: G_HYBRID_MAP, sphericalMercator: true, numZoomLevels: 20});\n var gter = new OpenLayers.Layer.Google(\"Google Terrain\",\n {type: G_PHYSICAL_MAP, sphericalMercator: true, numZoomLevels: 20 });\n\n // create Virtual Earth layers\n OpenLayers.Layer.VirtualEarth.prototype.MAX_ZOOM_LEVEL=19;\n OpenLayers.Layer.VirtualEarth.prototype.RESOLUTIONS=OpenLayers.Layer.Google.prototype.RESOLUTIONS\n var veroad = new OpenLayers.Layer.VirtualEarth(\"Virtual Earth Roads\",\n {'type': VEMapStyle.Road, 'sphericalMercator': true, numZoomLevels: 20});\n var veaer = new OpenLayers.Layer.VirtualEarth(\"Virtual Earth Aerial\",\n {'type': VEMapStyle.Aerial, 'sphericalMercator': true, numZoomLevels: 20 });\n var vehyb = new OpenLayers.Layer.VirtualEarth(\"Virtual Earth Hybrid\",\n {'type': VEMapStyle.Hybrid, 'sphericalMercator': true});\n\n // create Yahoo layer\n var yahoo = new OpenLayers.Layer.Yahoo(\"Yahoo Street\",\n {'sphericalMercator': true});\n var yahoosat = new OpenLayers.Layer.Yahoo(\"Yahoo Satellite\",\n {'type': YAHOO_MAP_SAT, 'sphericalMercator': true});\n var yahoohyb = new OpenLayers.Layer.Yahoo(\"Yahoo Hybrid\",\n {'type': YAHOO_MAP_HYB, 'sphericalMercator': true});\n\n // create OSM/OAM layer\n var osm = new OpenLayers.Layer.TMS( \"OpenStreetMap\",\n \"http://tile.openstreetmap.org/\",\n { type: 'png', getURL: osm_getTileURL, displayOutsideMaxExtent: true,\n attribution: '<a href=\"http://www.openstreetmap.org/\">OpenStreetMap</a>'} );\n var oam = new OpenLayers.Layer.TMS( \"OpenAerialMap\",\n \"http://tile.openaerialmap.org/tiles/1.0.0/openaerialmap-900913/\",\n { type: 'png', getURL: osm_getTileURL } );\n\n // create TMS Overlay layer\n var tmsoverlay = new OpenLayers.Layer.TMS( \"TMS Overlay\", \"\",\n { // url: '', serviceVersion: '.', layername: '.',\n type: '%(tileformat)s', getURL: overlay_getTileURL, alpha: %(has_alpha)s,\n isBaseLayer: false\n });\n if (OpenLayers.Util.alphaHack() == false) { tmsoverlay.setOpacity(0.7); }\n\n map.addLayers([gmap, gsat, ghyb, gter, veroad, veaer, vehyb,\n yahoo, yahoosat, yahoohyb, osm, oam,\n tmsoverlay]);\n\n var switcherControl = new OpenLayers.Control.LayerSwitcher();\n map.addControl(switcherControl);\n switcherControl.maximizeControl();\n\n map.zoomToExtent( mapBounds.transform(map.displayProjection, map.projection ) );\n \"\"\" % args\n\n elif self.options.profile == 'geodetic':\n s += \"\"\"\n var options = {\n controls: [],\n projection: new OpenLayers.Projection(\"EPSG:4326\"),\n maxResolution: 0.703125,\n maxExtent: new OpenLayers.Bounds(-180, -90, 180, 90)\n };\n map = new OpenLayers.Map('map', options);\n\n layer = new OpenLayers.Layer.WMS( \"Blue Marble\",\n \"http://labs.metacarta.com/wms-c/Basic.py?\", {layers: 'satellite' } );\n map.addLayer(layer);\n wms = new OpenLayers.Layer.WMS( \"VMap0\",\n \"http://labs.metacarta.com/wms-c/Basic.py?\", {layers: 'basic', format: 'image/png' } );\n map.addLayer(wms);\n\n var tmsoverlay = new OpenLayers.Layer.TMS( \"TMS Overlay\", \"\",\n {\n serviceVersion: '.', layername: '.', alpha: %(has_alpha)s,\n type: '%(tileformat)s', getURL: overlay_getTileURL,\n isBaseLayer: false\n });\n map.addLayer(tmsoverlay);\n if (OpenLayers.Util.alphaHack() == false) { tmsoverlay.setOpacity(0.7); }\n\n var switcherControl = new OpenLayers.Control.LayerSwitcher();\n map.addControl(switcherControl);\n switcherControl.maximizeControl();\n\n map.zoomToExtent( mapBounds );\n \"\"\" % args\n\n elif self.options.profile in ('raster', 'gearth'):\n s += \"\"\"\n var options = {\n controls: [],\n maxExtent: new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s ),\n maxResolution: %(rastermaxresolution)f,\n numZoomLevels: %(rasterzoomlevels)d\n };\n map = new OpenLayers.Map('map', options);\n\n var layer = new OpenLayers.Layer.TMS( \"TMS Layer\",\"\",\n { url: '', serviceVersion: '.', layername: '.', alpha: %(has_alpha)s,\n type: '%(tileformat)s', getURL: overlay_getTileURL\n });\n map.addLayer(layer);\n map.zoomToExtent( mapBounds );\n \"\"\" % args\n\n\n s += \"\"\"\n map.addControl(new OpenLayers.Control.PanZoomBar());\n map.addControl(new OpenLayers.Control.MousePosition());\n map.addControl(new OpenLayers.Control.MouseDefaults());\n map.addControl(new OpenLayers.Control.KeyboardDefaults());\n }\n \"\"\" % args\n\n if self.options.profile == 'mercator':\n s += \"\"\"\n function osm_getTileURL(bounds) {\n var res = this.map.getResolution();\n var x = Math.round((bounds.left - this.maxExtent.left) / (res * this.tileSize.w));\n var y = Math.round((this.maxExtent.top - bounds.top) / (res * this.tileSize.h));\n var z = this.map.getZoom();\n var limit = Math.pow(2, z);\n\n if (y < 0 || y >= limit) {\n return \"https://github.com/mj10777/mapmbtiles/img/none.png\";\n } else {\n x = ((x %% limit) + limit) %% limit;\n return this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n }\n }\n\n function overlay_getTileURL(bounds) {\n var res = this.map.getResolution();\n var x = Math.round((bounds.left - this.maxExtent.left) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.map.getZoom();\n if (this.map.baseLayer.name == 'Virtual Earth Roads' || this.map.baseLayer.name == 'Virtual Earth Aerial' || this.map.baseLayer.name == 'Virtual Earth Hybrid') {\n z = z + 1;\n }\n if (mapBounds.intersectsBounds( bounds ) && z >= mapMinZoom && z <= mapMaxZoom ) {\n //console.log( this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type);\n return this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n } else {\n return \"https://github.com/mj10777/mapmbtiles/img/none.png\";\n }\n }\n \"\"\" % args\n\n elif self.options.profile == 'geodetic':\n s += \"\"\"\n function overlay_getTileURL(bounds) {\n bounds = this.adjustBounds(bounds);\n var res = this.map.getResolution();\n var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.map.getZoom();\n var path = this.serviceVersion + \"/\" + this.layername + \"/\" + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n var url = this.url;\n if (mapBounds.intersectsBounds( bounds ) && z >= mapMinZoom && z <= mapMaxZoom) {\n // console.log( this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type);\n return this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n } else {\n return \"https://github.com/mj10777/mapmbtiles/img/none.png\";\n }\n }\n \"\"\" % args\n\n elif self.options.profile in ('raster','gearth'):\n s += \"\"\"\n function overlay_getTileURL(bounds) {\n var res = this.map.getResolution();\n var x = Math.round((bounds.left - this.maxExtent.left) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.maxExtent.bottom) / (res * this.tileSize.h));\n var z = this.map.getZoom();\n if (x >= 0 && y >= 0) {\n return this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n } else {\n return \"https://github.com/mj10777/mapmbtiles/img/none.png\";\n }\n }\n \"\"\" % args\n\n s += \"\"\"\n function getWindowHeight() {\n if (self.innerHeight) return self.innerHeight;\n if (document.documentElement && document.documentElement.clientHeight)\n return document.documentElement.clientHeight;\n if (document.body) return document.body.clientHeight;\n return 0;\n }\n\n function getWindowWidth() {\n if (self.innerWidth) return self.innerWidth;\n if (document.documentElement && document.documentElement.clientWidth)\n return document.documentElement.clientWidth;\n if (document.body) return document.body.clientWidth;\n return 0;\n }\n\n function resize() {\n var map = document.getElementById(\"map\");\n var header = document.getElementById(\"header\");\n var subheader = document.getElementById(\"subheader\");\n map.style.height = (getWindowHeight()-80) + \"px\";\n map.style.width = (getWindowWidth()-20) + \"px\";\n header.style.width = (getWindowWidth()-20) + \"px\";\n subheader.style.width = (getWindowWidth()-20) + \"px\";\n if (map.updateSize) { map.updateSize(); };\n }\n\n onresize=function(){ resize(); };\n\n </script>\n </head>\n <body onload=\"init()\">\n <div id=\"header\"><h1>%(title)s</h1></div>\n <div id=\"subheader\">Generated by <a href=\"https://github.com/mj10777/mapmbtiles\">MapMbTiles</a>/<a href=\"http://www.klokan.cz/projects/gdal2mbtiles/\">GDAL2MbTiles</a>, Copyright &copy; 2008 <a href=\"http://www.klokan.cz/\">Klokan Petr Pridal</a>, <a href=\"http://www.gdal.org/\">GDAL</a> &amp; <a href=\"http://www.osgeo.org/\">OSGeo</a> <a href=\"http://code.google.com/soc/\">GSoC</a>\n <!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->\n </div>\n <div id=\"map\"></div>\n <script type=\"text/javascript\" >resize()</script>\n </body>\n </html>\"\"\" % args\n\n return s", "def generate_base_tiles(self):\n\n gdal.SetConfigOption(\"GDAL_PAM_ENABLED\", \"NO\")\n\n print \"Generating Base Tiles:\"\n if self.options.verbose:\n #mx, my = self.out_gt[0], self.out_gt[3] # OriginX, OriginY\n #px, py = self.mercator.MetersToPixels( mx, my, self.tmaxz)\n #print \"Pixel coordinates:\", px, py, (mx, my)\n print\n print \"Tiles generated from the max zoom level:\"\n print \"----------------------------------------\"\n print\n\n\n # Set the bounds\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n querysize = self.querysize\n\n # Just the center tile\n #tminx = tminx+ (tmaxx - tminx)/2\n #tminy = tminy+ (tmaxy - tminy)/2\n #tmaxx = tminx\n #tmaxy = tminy\n\n #print tminx, tminy, tmaxx, tmaxy\n tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n #print tcount\n ti = 0\n i_y_column_count=((tmaxy-tminy)+1)\n ds = self.out_ds\n tz = self.tmaxz\n if self.options.verbose:\n # tx in range(tminx, tmaxx+1) tminx[ 281596 ] tmaxx[ 281744 ] ; ((tmaxx-tmaxy)+1) x_tiles[ 23393 ]\n print \"\\ttz=[\",tz,\"] : tx in range(tminx, tmaxx+1) tminx[\",tminx,\"] tmaxx[\",tmaxx,\"] ; ((tmaxx-tmaxy)+1) x_tiles[\",tcount,\"]\"\n # ty_tms in range(tmaxy, tminy-1, -1) tmaxy[ 352409 ] tminy[ 352253 ] ; ((tmaxy-tminy)) y_tiles[ 157 ] 352409-(352253-1)\n print \"\\ttz=[\",tz,\"] : ty_tms in range(tmaxy, tminy-1, -1) tmaxy[\",tmaxy,\"] tminy[\",tminy,\"] ; ((tmaxy-tminy+1)) y_tiles[\",i_y_column_count,\"]\"\n if self.options.resume:\n i_count = self.tile_exists(0, 0, tz,2)\n if i_count == tcount:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; x/y-tiles of z[\",tz,\"] y_tiles[\",tcount,\"]\"\n return\n for tx in range(tminx, tmaxx+1):\n tmaxy_work=tmaxy\n if self.options.resume:\n i_count = self.tile_exists(tx, 0, tz,3)\n if i_count == i_y_column_count:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n break\n else:\n if i_count > 0:\n # this assums the rows are compleate, which may NOT be true\n tmaxy_work-=i_count\n if self.options.verbose:\n print \"\\tTile generation skipped to tmaxy[\",tmaxy_work,\"] because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n for ty_tms in range(tmaxy_work, tminy-1, -1): #range(tminy, tmaxy+1):\n ty_osm=self.flip_y(tz,ty_tms)\n ty=ty_tms\n if self.options.tms_osm:\n ty=ty_osm\n if self.stopped:\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n break\n ti += 1\n\n if self.options.resume:\n exists = self.tile_exists(tx, ty, tz,0)\n if exists and self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; x =\",tx,\" ; y_tms =\",ty_tms, \"; y_osm =\",ty_osm\n else:\n exists = False\n\n if not exists:\n if self.options.verbose:\n print ti, '/', tcount, self.get_verbose_tile_name(tx, ty, tz)\n # Don't scale up by nearest neighbour, better change the querysize\n # to the native resolution (and return smaller query tile) for scaling\n if self.options.profile in ('mercator','geodetic'):\n if self.options.profile == 'mercator':\n # Tile bounds in EPSG:900913\n b = self.mercator.TileBounds(tx, ty_tms, tz)\n elif self.options.profile == 'geodetic':\n b = self.geodetic.TileBounds(tx, ty_tms, tz)\n\n rb, wb = self.geo_query( ds, b[0], b[3], b[2], b[1])\n nativesize = wb[0]+wb[2] # Pixel size in the raster covering query geo extent\n if self.options.verbose:\n print \"\\tNative Extent (querysize\",nativesize,\"): \", rb, wb\n\n querysize = self.querysize\n # Tile bounds in raster coordinates for ReadRaster query\n rb, wb = self.geo_query( ds, b[0], b[3], b[2], b[1], querysize=querysize)\n\n rx, ry, rxsize, rysize = rb\n wx, wy, wxsize, wysize = wb\n else: # 'raster' or 'gearth' or 'garmin' profile:\n tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom\n xsize = self.out_ds.RasterXSize # size of the raster in pixels\n ysize = self.out_ds.RasterYSize\n if tz >= self.nativezoom:\n querysize = self.tilesize # int(2**(self.nativezoom-tz) * self.tilesize)\n\n rx = (tx) * tsize\n rxsize = 0\n if tx == tmaxx:\n rxsize = xsize % tsize\n if rxsize == 0:\n rxsize = tsize\n\n rysize = 0\n if ty_tms == tmaxy:\n rysize = ysize % tsize\n if rysize == 0:\n rysize = tsize\n ry = ysize - (ty_tms * tsize) - rysize\n\n wx, wy = 0, 0\n\n wxsize, wysize = int(rxsize/float(tsize) * querysize), int(rysize/float(tsize) * querysize)\n if wysize != querysize:\n wy = querysize - wysize\n xyzzy = Xyzzy(querysize, rx, ry, rxsize, rysize, wx, wy, wxsize, wysize)\n try:\n if self.options.verbose:\n print ti,'/',tcount,' total ; z =',tz,' ; x =',tx,' ; y_tms =',ty_tms,' ; y_osm =',ty_osm\n print \"\\tReadRaster Extent: \", (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)\n self.write_base_tile(tx, ty, tz, xyzzy)\n except ImageOutputException, e:\n self.error(\"'%d/%d/%d': %s\" % (tz, tx, ty, e.message))\n\n if not self.options.verbose or self.is_subprocess:\n self.progressbar( ti / float(tcount) )\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None", "def compute_tiles(depc, gid_list, config=None):\n from os.path import abspath, join, relpath\n\n ibs = depc.controller\n\n tile_width = config['tile_width']\n tile_height = config['tile_height']\n tile_overlap = config['tile_overlap']\n tile_offset = config['tile_offset']\n allow_borders = config['allow_borders']\n keep_extern = config['keep_extern']\n\n if allow_borders:\n assert tile_offset == 0, 'Cannot use an offset with borders turned on'\n\n config_dict = dict(config)\n config_hashid = config.get_hashid()\n\n gpath_list = ibs.get_image_paths(gid_list)\n orient_list = ibs.get_image_orientation(gid_list)\n\n tile_size = (tile_width, tile_height)\n tile_size_list = [tile_size] * len(gid_list)\n tile_overlap_list = [tile_overlap] * len(gid_list)\n tile_offset_list = [tile_offset] * len(gid_list)\n\n tile_output_path = abspath(join(depc.cache_dpath, 'extern_tiles'))\n ut.ensuredir(tile_output_path)\n\n fmt_str = join(tile_output_path, 'tiles_gid_%d_w_%d_h_%d_ol_%d_os_%d_%s')\n output_path_list = [\n fmt_str\n % (\n gid,\n tile_width,\n tile_height,\n tile_overlap,\n tile_offset,\n config_hashid,\n )\n for gid in gid_list\n ]\n allow_border_list = [allow_borders] * len(gid_list)\n\n for output_path in output_path_list:\n ut.ensuredir(output_path)\n\n # Execute all tasks in parallel\n args_list = list(\n zip(\n gid_list,\n gpath_list,\n orient_list,\n tile_size_list,\n tile_overlap_list,\n tile_offset_list,\n output_path_list,\n allow_border_list,\n )\n )\n\n genkw = {\n 'ordered': True,\n 'chunksize': 256,\n 'progkw': {'freq': 50},\n # 'adjust': True,\n 'futures_threaded': True,\n 'force_serial': ibs.force_serial or config['force_serial'],\n }\n gen = ut.generate2(compute_tile_helper, args_list, nTasks=len(args_list), **genkw)\n for val in gen:\n parent_gid, output_path, tile_filepath_list, bbox_list, border_list = val\n\n if keep_extern:\n gids = ibs.add_images(\n tile_filepath_list,\n auto_localize=False,\n ensure_loadable=False,\n ensure_exif=False,\n )\n else:\n gids = ibs.add_images(tile_filepath_list)\n\n if ut.duplicates_exist(gids):\n flag_list = []\n seen_set = set()\n for gid in gids:\n if gid is None:\n flag = False\n else:\n flag = gid not in seen_set\n seen_set.add(gid)\n flag_list.append(flag)\n gids = ut.compress(gids, flag_list)\n bbox_list = ut.compress(bbox_list, flag_list)\n border_list = ut.compress(border_list, flag_list)\n\n num = len(gids)\n parent_gids = [parent_gid] * num\n config_dict_list = [config_dict] * num\n config_hashid_list = [config_hashid] * num\n\n ibs.set_tile_source(\n gids,\n parent_gids,\n bbox_list,\n border_list,\n config_dict_list,\n config_hashid_list,\n )\n\n if keep_extern:\n tile_relative_filepath_list_ = [\n relpath(tile_filepath, start=depc.cache_dpath)\n for tile_filepath in tile_filepath_list\n ]\n else:\n ut.delete(output_path)\n tile_relative_filepath_list_ = [None] * len(tile_filepath_list)\n\n yield tile_relative_filepath_list_, gids, num", "def to_xyz_tiles(\n self, root: str, tile_size: int, zoom_levels: list, driver=\"GTiff\", **kwargs\n ):\n mName = os.path.normpath(os.path.basename(root))\n\n def create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n def tile_window(shape, px):\n \"\"\"Yield (left, upper, width, height).\"\"\"\n nr, nc = shape\n lu = product(range(0, nc, px), range(0, nr, px))\n\n ## create the window\n for l, u in lu:\n h = min(px, nr - u)\n w = min(px, nc - l)\n yield (l, u, w, h)\n\n vrt_fn = None\n prev = 0\n nodata = self.nodata\n obj = self._obj.copy()\n zls = {}\n for zl in zoom_levels:\n diff = zl - prev\n pxzl = tile_size * (2 ** (diff))\n\n # read data from previous zoomlevel\n if vrt_fn is not None:\n obj = xr.open_dataarray(vrt_fn, engine=\"rasterio\").squeeze(\n \"band\", drop=True\n )\n x_dim, y_dim = obj.raster.x_dim, obj.raster.y_dim\n obj = obj.chunk({x_dim: pxzl, y_dim: pxzl})\n dst_res = abs(obj.raster.res[-1]) * (2 ** (diff))\n\n if pxzl > min(obj.shape):\n logger.warning(\n f\"Tiles at zoomlevel {zl} smaller than tile_size {tile_size}\"\n )\n\n # Write the raster paths to a text file\n sd = join(root, f\"{zl}\")\n create_folder(sd)\n txt_path = join(sd, \"filelist.txt\")\n file = open(txt_path, \"w\")\n\n for l, u, w, h in tile_window(obj.shape, pxzl):\n col = int(np.ceil(l / pxzl))\n row = int(np.ceil(u / pxzl))\n ssd = join(sd, f\"{col}\")\n\n create_folder(ssd)\n\n # create temp tile\n temp = obj[u : u + h, l : l + w]\n if zl != 0:\n temp = temp.coarsen(\n {x_dim: 2**diff, y_dim: 2**diff}, boundary=\"pad\"\n ).mean()\n temp.raster.set_nodata(nodata)\n\n if driver == \"netcdf4\":\n path = join(ssd, f\"{row}.nc\")\n temp = temp.raster.gdal_compliant()\n temp.to_netcdf(path, engine=\"netcdf4\", **kwargs)\n elif driver in gis_utils.GDAL_EXT_CODE_MAP:\n ext = gis_utils.GDAL_EXT_CODE_MAP.get(driver)\n path = join(ssd, f\"{row}.{ext}\")\n temp.raster.to_raster(path, driver=driver, **kwargs)\n else:\n raise ValueError(f\"Unkown file driver {driver}\")\n\n file.write(f\"{path}\\n\")\n\n del temp\n\n file.close()\n # Create a vrt using GDAL\n vrt_fn = join(root, f\"{mName}_zl{zl}.vrt\")\n gis_utils.create_vrt(vrt_fn, file_list_path=txt_path)\n prev = zl\n zls.update({zl: float(dst_res)})\n del obj\n\n # Write a quick data catalog yaml\n yml = {\n \"crs\": self.crs.to_epsg(),\n \"data_type\": \"RasterDataset\",\n \"driver\": \"raster\",\n \"path\": f\"{mName}_zl{{zoom_level}}.vrt\",\n \"zoom_levels\": zls,\n }\n with open(join(root, f\"{mName}.yml\"), \"w\") as f:\n yaml.dump({mName: yml}, f, default_flow_style=False, sort_keys=False)", "def generate_openlayers(self):\n\n args = {}\n args['title'] = self.options.title\n args['bingkey'] = self.options.bingkey\n args['south'], args['west'], args['north'], args['east'] = self.swne\n args['minzoom'] = self.tminz\n args['maxzoom'] = self.tmaxz\n args['tilesize'] = self.tilesize\n args['tileformat'] = self.tileext\n args['publishurl'] = self.options.url\n args['copyright'] = self.options.copyright\n if self.options.tmscompatible:\n args['tmsoffset'] = \"-1\"\n else:\n args['tmsoffset'] = \"\"\n if self.options.profile == 'raster':\n args['rasterzoomlevels'] = self.tmaxz+1\n args['rastermaxresolution'] = 2**(self.nativezoom) * self.out_gt[1]\n\n s = r\"\"\"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n <html xmlns=\"http://www.w3.org/1999/xhtml\"\n <head>\n <title>%(title)s</title>\n <meta http-equiv='imagetoolbar' content='no'/>\n <style type=\"text/css\"> v\\:* {behavior:url(#default#VML);}\n html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }\n body { margin: 10px; background: #fff; }\n h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }\n #header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }\n #subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}\n #map { height: 95%%; border: 1px solid #888; }\n .olImageLoadError { display: none; }\n .olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; }\n </style>\"\"\" % args # noqa\n\n if self.options.profile == 'mercator':\n s += \"\"\"\n <script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script>\n \"\"\" % args\n\n s += \"\"\"\n <script src=\"http://www.openlayers.org/api/2.12/OpenLayers.js\"></script>\n <script>\n var map;\n var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);\n var mapMinZoom = %(minzoom)s;\n var mapMaxZoom = %(maxzoom)s;\n var emptyTileURL = \"http://www.maptiler.org/img/none.png\";\n OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;\n\n function init(){\"\"\" % args\n\n if self.options.profile == 'mercator':\n s += \"\"\"\n var options = {\n div: \"map\",\n controls: [],\n projection: \"EPSG:3857\",\n displayProjection: new OpenLayers.Projection(\"EPSG:4326\"),\n numZoomLevels: 20\n };\n map = new OpenLayers.Map(options);\n\n // Create Google Mercator layers\n var gmap = new OpenLayers.Layer.Google(\"Google Streets\",\n {\n type: google.maps.MapTypeId.ROADMAP,\n sphericalMercator: true\n });\n var gsat = new OpenLayers.Layer.Google(\"Google Satellite\",\n {\n type: google.maps.MapTypeId.SATELLITE,\n sphericalMercator: true\n });\n var ghyb = new OpenLayers.Layer.Google(\"Google Hybrid\",\n {\n type: google.maps.MapTypeId.HYBRID,\n sphericalMercator: true\n });\n var gter = new OpenLayers.Layer.Google(\"Google Terrain\",\n {\n type: google.maps.MapTypeId.TERRAIN,\n sphericalMercator: true\n });\n\n // Create Bing layers\n var broad = new OpenLayers.Layer.Bing({\n name: \"Bing Roads\",\n key: \"%(bingkey)s\",\n type: \"Road\",\n sphericalMercator: true\n });\n var baer = new OpenLayers.Layer.Bing({\n name: \"Bing Aerial\",\n key: \"%(bingkey)s\",\n type: \"Aerial\",\n sphericalMercator: true\n });\n var bhyb = new OpenLayers.Layer.Bing({\n name: \"Bing Hybrid\",\n key: \"%(bingkey)s\",\n type: \"AerialWithLabels\",\n sphericalMercator: true\n });\n\n // Create OSM layer\n var osm = new OpenLayers.Layer.OSM(\"OpenStreetMap\");\n\n \"\"\" % args # noqa\n\t\t \n if self.options.xyz:\n s += \"\"\"\t\t \n // create TMS Overlay layer\n var tmsoverlay = new OpenLayers.Layer.XYZ(\"XYZ Overlay\",\n \"${z}/${x}/${y}.png\", {\n transitionEffect: 'resize',\n isBaseLayer: false\n });\n\t\t\t\t \n\t\t \"\"\" % args # noqa\n else:\n s += \"\"\"\t\t \n // create TMS Overlay layer\n var tmsoverlay = new OpenLayers.Layer.TMS(\"TMS Overlay\", \"\",\n {\n serviceVersion: '.',\n layername: '.',\n alpha: true,\n type: '%(tileformat)s',\n isBaseLayer: false,\n getURL: getURL\n });\n\t\t\t\t \n\t\t \"\"\" % args # noqa\t\t \n\t\t \n s += \"\"\" \n if (OpenLayers.Util.alphaHack() == false) {\n tmsoverlay.setOpacity(0.7);\n }\n\n map.addLayers([gmap, gsat, ghyb, gter,\n broad, baer, bhyb,\n osm, tmsoverlay]);\n\n var switcherControl = new OpenLayers.Control.LayerSwitcher();\n map.addControl(switcherControl);\n switcherControl.maximizeControl();\n\n map.zoomToExtent(mapBounds.transform(map.displayProjection, map.projection));\n \"\"\" % args # noqa\n\n elif self.options.profile == 'geodetic':\n s += \"\"\"\n var options = {\n div: \"map\",\n controls: [],\n projection: \"EPSG:4326\"\n };\n map = new OpenLayers.Map(options);\n\n var wms = new OpenLayers.Layer.WMS(\"VMap0\",\n \"http://tilecache.osgeo.org/wms-c/Basic.py?\",\n {\n layers: 'basic',\n format: 'image/png'\n }\n );\n var tmsoverlay = new OpenLayers.Layer.TMS(\"TMS Overlay\", \"\",\n {\n serviceVersion: '.',\n layername: '.',\n alpha: true,\n type: '%(tileformat)s',\n isBaseLayer: false,\n getURL: getURL\n });\n if (OpenLayers.Util.alphaHack() == false) {\n tmsoverlay.setOpacity(0.7);\n }\n\n map.addLayers([wms,tmsoverlay]);\n\n var switcherControl = new OpenLayers.Control.LayerSwitcher();\n map.addControl(switcherControl);\n switcherControl.maximizeControl();\n\n map.zoomToExtent(mapBounds);\n \"\"\" % args # noqa\n\n elif self.options.profile == 'raster':\n s += \"\"\"\n var options = {\n div: \"map\",\n controls: [],\n maxExtent: new OpenLayers.Bounds(%(west)s, %(south)s, %(east)s, %(north)s),\n maxResolution: %(rastermaxresolution)f,\n numZoomLevels: %(rasterzoomlevels)d\n };\n map = new OpenLayers.Map(options);\n\n var layer = new OpenLayers.Layer.TMS(\"TMS Layer\", \"\",\n {\n serviceVersion: '.',\n layername: '.',\n alpha: true,\n type: '%(tileformat)s',\n getURL: getURL\n });\n\n map.addLayer(layer);\n map.zoomToExtent(mapBounds);\n \"\"\" % args # noqa\n\n s += \"\"\"\n map.addControls([new OpenLayers.Control.PanZoomBar(),\n new OpenLayers.Control.Navigation(),\n new OpenLayers.Control.MousePosition(),\n new OpenLayers.Control.ArgParser(),\n new OpenLayers.Control.Attribution()]);\n }\n \"\"\" % args\n\n if self.options.profile == 'mercator' and self.options.xyz is None:\n s += \"\"\"\n function getURL(bounds) {\n bounds = this.adjustBounds(bounds);\n var res = this.getServerResolution();\n var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.getServerZoom();\n if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') {\n z+=1;\n }\n var path = this.serviceVersion + \"/\" + this.layername + \"/\" + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n var url = this.url;\n if (OpenLayers.Util.isArray(url)) {\n url = this.selectUrl(path, url);\n }\n if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {\n return url + path;\n } else {\n return emptyTileURL;\n }\n }\n \"\"\" % args # noqa\n\n elif self.options.profile == 'geodetic':\n s += \"\"\"\n function getURL(bounds) {\n bounds = this.adjustBounds(bounds);\n var res = this.getServerResolution();\n var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.getServerZoom()%(tmsoffset)s;\n var path = this.serviceVersion + \"/\" + this.layername + \"/\" + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n var url = this.url;\n if (OpenLayers.Util.isArray(url)) {\n url = this.selectUrl(path, url);\n }\n if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {\n return url + path;\n } else {\n return emptyTileURL;\n }\n }\n \"\"\" % args # noqa\n\n elif self.options.profile == 'raster':\n s += \"\"\"\n function getURL(bounds) {\n bounds = this.adjustBounds(bounds);\n var res = this.getServerResolution();\n var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.getServerZoom();\n var path = this.serviceVersion + \"/\" + this.layername + \"/\" + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n var url = this.url;\n if (OpenLayers.Util.isArray(url)) {\n url = this.selectUrl(path, url);\n }\n if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {\n return url + path;\n } else {\n return emptyTileURL;\n }\n }\n \"\"\" % args # noqa\n\n s += \"\"\"\n function getWindowHeight() {\n if (self.innerHeight) return self.innerHeight;\n if (document.documentElement && document.documentElement.clientHeight)\n return document.documentElement.clientHeight;\n if (document.body) return document.body.clientHeight;\n return 0;\n }\n\n function getWindowWidth() {\n if (self.innerWidth) return self.innerWidth;\n if (document.documentElement && document.documentElement.clientWidth)\n return document.documentElement.clientWidth;\n if (document.body) return document.body.clientWidth;\n return 0;\n }\n\n function resize() {\n var map = document.getElementById(\"map\");\n var header = document.getElementById(\"header\");\n var subheader = document.getElementById(\"subheader\");\n map.style.height = (getWindowHeight()-80) + \"px\";\n map.style.width = (getWindowWidth()-20) + \"px\";\n header.style.width = (getWindowWidth()-20) + \"px\";\n subheader.style.width = (getWindowWidth()-20) + \"px\";\n if (map.updateSize) { map.updateSize(); };\n }\n\n onresize=function(){ resize(); };\n\n </script>\n </head>\n <body onload=\"init()\">\n <div id=\"header\"><h1>%(title)s</h1></div>\n <div id=\"subheader\">Generated by <a href=\"http://www.klokan.cz/projects/gdal2tiles/\">GDAL2Tiles</a>, Copyright &copy; 2008 <a href=\"http://www.klokan.cz/\">Klokan Petr Pridal</a>, <a href=\"http://www.gdal.org/\">GDAL</a> &amp; <a href=\"http://www.osgeo.org/\">OSGeo</a> <a href=\"http://code.google.com/soc/\">GSoC</a>\n <!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->\n </div>\n <div id=\"map\"></div>\n <script type=\"text/javascript\" >resize()</script>\n </body>\n </html>\"\"\" % args # noqa\n\n return s", "def initialize_observation_files(tile_file_list):\n n_tiles = len(tile_file_list)\n if(n_tiles>0):\n for tile_file in tile_file_list:\n target_tile_pack = util.TargetTile(tile_file) \n target_tile_pack.write_results_to_file(tile_file)\n return", "def create_map(\n datapointsPath: Union[Path, str],\n linksPath: Union[Path, str],\n datapointAttrPath: Union[Path, str],\n node_attr_map: Dict[str, str],\n link_attr_map: Dict[str, str],\n snapshots: List[Dict] = [],\n playerSettings: Dict[str, Any] = {},\n outFolder: Union[Path, str] = \"data_out\",\n):\n\n # create folders and copy the index file\n print(f\">> creating folders\")\n out_dir = Path(outFolder)\n out_data_path = out_dir / \"data\"\n if not out_data_path.exists():\n print(f\"\\t- new folder - {out_data_path}\")\n out_data_path.mkdir(parents=True, exist_ok=True)\n else:\n print(f\"\\t- found existing. overwriting - {out_data_path}\")\n\n # copy the index and run scripts to out directory\n shutil.copy(\"src/index.html\", out_dir)\n print(f\"\\t- copied {out_dir}/index.html\")\n\n shutil.copy(\"src/run_local.sh\", out_dir)\n print(f\"\\t- copied {out_dir}/run_local.sh\\n\")\n\n # write the files\n print(f\">> building dataset\")\n __write_dataset_file(datapointsPath, datapointAttrPath, out_data_path)\n print(f\"\\t- new dataset file written to {out_data_path / 'nodes.json'}.\\n\")\n\n print(f\">> building network\")\n __write_network_file(datapointsPath, linksPath, node_attr_map, link_attr_map, out_data_path)\n print(f\"\\t- new network file written to {out_data_path / 'links.json'}.\\n\")\n\n print(f\">> building settings\")\n __write_settings_file(snapshots, playerSettings, out_data_path)\n print(f\"\\t- new settings file written to {out_data_path / 'settings.json'}.\\n\")", "def export_data(args, links_locations_and_timestamps, gps_coordinates):\n\n json_dump = []\n errors = []\n\n os.makedirs(\"output/\" + args.target_account, exist_ok=True)\n\n for i in range(0, len(links_locations_and_timestamps)):\n links_locations_and_timestamps[i].append(gps_coordinates[i])\n if gps_coordinates[i] != \"Error\":\n json_dump.append({\n \"link\": links_locations_and_timestamps[i][0],\n \"place\": links_locations_and_timestamps[i][1],\n \"timestamp\": links_locations_and_timestamps[i][2],\n \"gps\": {\n \"lat\": links_locations_and_timestamps[i][3][0],\n \"lon\": links_locations_and_timestamps[i][3][1],\n },\n })\n else:\n errors.append(({\n \"link\": links_locations_and_timestamps[i][0],\n \"place\": links_locations_and_timestamps[i][1],\n \"timestamp\": links_locations_and_timestamps[i][2],\n \"gps\": \"Error\",\n }))\n with open(\n \"output/\" + args.target_account + \"/\" + args.target_account +\n \"_instaloctrack_data.json\", \"w\") as filehandle:\n json.dump(json_dump, filehandle)\n\n with open(\n \"output/\" + args.target_account + \"/\" + args.target_account +\n \"_instaloctrack_errors.json\", \"w\") as filehandle:\n json.dump(errors, filehandle)\n print(Fore.WHITE +\n \"Location names, timestamps, and GPS Coordinates were written to :\" +\n Fore.GREEN + \" output/\" + args.target_account + \"/\" +\n args.target_account + \"_instaloctrack_data.json\")\n\n return len(json_dump), len(errors)", "def save_coords(self, coords, output_dir):\n new_rows = []\n for i, (lat, lon) in enumerate(coords):\n row = {\n 'tile_id': i,\n 'lat':lat,\n 'long':lon,\n 'side_length': self.side_len \n }\n\n new_rows.append(row)\n\n coord_df = pd.DataFrame(new_rows)\n coord_df.to_csv(f\"{output_dir}/coordinate_map.csv\", index=False)\n print(\"done saving coordinates!\")", "def main():\n\tparser = construct_parser()\n\targs = parser.parse_args()\n\ttiles = slice(args.image, args.num_tiles, save=False)\n\tsave_tiles(tiles, prefix=get_basename(args.image), directory=args.dir,\n\t\t format=args.format)", "def ard_tile_export_generator(study_area_path, wrs2_coll, cell_size=30,\n wrs2_tile_list=[], wrs2_tile_field='WRS2_TILE',\n wrs2_buffer=0, n_max=1000, simplify_buffer=240):\n\n # Hard code parameters for ARD grid\n snap_x, snap_y = 15, 15\n tile_cells = 5000\n output_geo = (30, 0, -2565585, 0, -30, 3314805)\n # Based on WELD and similar/identical? to LANDFIRE but using WGS84\n # https://landsat.usgs.gov/sites/default/files/documents/LSDS-1873_US_Landsat_ARD_DFCB.pdf\n output_osr = osr.SpatialReference()\n output_osr.ImportFromProj4(\n '+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23.0 +lon_0=-96 '\n '+x_0=0 +y_0=0 +ellps=GRS80 +datum=WGS84 +units=m +no_defs')\n output_crs = str(output_osr.ExportToWkt())\n logging.debug('\\n {:16s} {}'.format('Output crs:', output_crs))\n\n\n logging.info(' Reading study area shapefile')\n logging.info(' {}'.format(study_area_path))\n study_area_ds = ogr.Open(study_area_path, 0)\n study_area_lyr = study_area_ds.GetLayer()\n study_area_osr = study_area_lyr.GetSpatialRef()\n study_area_crs = str(study_area_osr.ExportToWkt())\n # study_area_proj4 = study_area_osr.ExportToProj4()\n logging.debug(' Study area projection: {}'.format(study_area_crs))\n\n # Get the dissolved/unioned geometry of the study area\n output_geom = ogr.Geometry(ogr.wkbMultiPolygon)\n for study_area_ftr in study_area_lyr:\n output_geom = output_geom.Union(study_area_ftr.GetGeometryRef())\n study_area_ds = None\n\n # Project the study area geometry to the output coordinate system\n output_tx = osr.CoordinateTransformation(study_area_osr, output_osr)\n output_geom.Transform(output_tx)\n\n # # Get the output extent from the projected geometry\n # output_extent = list(output_geom.GetEnvelope())\n # # OGR extents are swapped from GDAL extents\n # output_extent[1], output_extent[2] = output_extent[2], output_extent[1]\n # logging.debug(' {:16s} {}'.format('Output Extent:', output_extent))\n\n # Compute tile size (in meters)\n tile_size = float(tile_cells) * cell_size\n\n # # Expand extent to fully include tiles\n # output_extent[0] = math.floor(\n # (output_extent[0] - snap_x) / tile_size) * tile_size + snap_x\n # output_extent[1] = math.floor(\n # (output_extent[1] - snap_y) / tile_size) * tile_size + snap_y\n # output_extent[2] = math.ceil(\n # (output_extent[2] - snap_x) / tile_size) * tile_size + snap_x\n # output_extent[3] = math.ceil(\n # (output_extent[3] - snap_y) / tile_size) * tile_size + snap_y\n # logging.debug(' {:16s} {}'.format('Adjusted Extent:', output_extent))\n\n # Create simplified geometries to speed up checking tile intersections\n output_hull = output_geom.ConvexHull()\n\n # Buffer/simplify values are assuming the geometry units are in meters\n output_simplify = output_geom.Buffer(simplify_buffer) \\\n .SimplifyPreserveTopology(simplify_buffer)\n\n # Generate an EE feature\n output_ee_geom = ee.Geometry(\n json.loads(output_simplify.ExportToJson()), output_crs, False)\n\n\n # ARD tile collection\n tiles_coll = ee.FeatureCollection('projects/eeflux/conus_ard_grid') \\\n .filterMetadata('conus', 'equals', 1) \\\n .filterBounds(output_ee_geom)\n # .filter('active', 'equals', 1)\n index_list = tiles_coll.aggregate_histogram('index').getInfo().keys()\n export_list = []\n for index in index_list:\n # logging.debug(' {}'.format(index))\n tile_h = int(index[1:4])\n tile_v = int(index[5:8])\n tile_geo = [\n cell_size, 0, output_geo[2] + tile_h * tile_size,\n 0, -cell_size, output_geo[5] - tile_v * tile_size]\n tile_extent = [\n tile_geo[2], tile_geo[5] - tile_size,\n tile_geo[2] + tile_size, tile_geo[5]]\n export_list.append({\n 'crs': output_crs,\n 'extent': tile_extent,\n 'geo': tile_geo,\n 'index': index,\n 'maxpixels': tile_cells * tile_cells + 1,\n 'shape': '{0}x{0}'.format(int(tile_cells)),\n })\n\n # Pre-filter the WRS2 descending collection\n # with the buffered tile geometry\n # Then buffer the WRS2 descending collection\n if wrs2_buffer:\n wrs2_coll = ee.FeatureCollection(wrs2_coll) \\\n .filterBounds(output_ee_geom.buffer(wrs2_buffer, 1)) \\\n .map(lambda ftr: ftr.buffer(wrs2_buffer, 1))\n else:\n wrs2_coll = ee.FeatureCollection(wrs2_coll) \\\n .filterBounds(output_ee_geom)\n\n # Apply the user defined WRS2 tile list\n if wrs2_tile_list:\n wrs2_coll = wrs2_coll.filter(ee.Filter.inList(\n 'WRS2_TILE', wrs2_tile_list))\n\n # Join intersecting geometries\n tiles_coll = ee.Join.saveAll(matchesKey='scenes').apply(\n tiles_coll, wrs2_coll,\n ee.Filter.intersects(leftField='.geo', rightField='.geo', maxError=10))\n\n def tile_scenes(tile):\n # Calling \".toList()\" allows the map to return the WRS2 tiles as a list\n scenes = ee.FeatureCollection(ee.List(ee.Feature(tile).get('scenes'))) \\\n .toList(n_max).map(lambda ftr: ee.Feature(ftr).get(wrs2_tile_field))\n return ee.Feature(None, {\n 'index': tile.get('index'),\n 'wrs2_tiles': scenes})\n tile_wrs2_info = ee.FeatureCollection(tiles_coll.map(tile_scenes)).getInfo()\n tile_wrs2_dict = {\n str(t['properties']['index']): map(str, t['properties']['wrs2_tiles'])\n for t in tile_wrs2_info['features']}\n\n # Pull the WRS2 tile list for each tile\n # Only yield exports that have intersecting WRS2 tiles\n for export_info in export_list:\n try:\n export_info['wrs2_tiles'] = sorted(\n tile_wrs2_dict[export_info['index']])\n yield export_info\n except KeyError:\n pass\n # t_index = export_info['index']\n # try:\n # # export_list[i]['wrs2_tiles'] = tile_pr_dict[t_index]\n # except KeyError:\n # # logging.debug(' Tile {} - no WRS2 tiles'.format(t_index))\n # # export_list[i]['wrs2_tiles'] = []", "def test_output_data():\n output_params = dict(\n type=\"geodetic\",\n format=\"GeoTIFF\",\n path=OUT_DIR,\n pixelbuffer=0,\n metatiling=1,\n bands=1,\n dtype=\"int16\"\n )\n output = gtiff.OutputData(output_params)\n assert output.path == OUT_DIR\n assert output.file_extension == \".tif\"\n tp = BufferedTilePyramid(\"geodetic\")\n tile = tp.tile(5, 5, 5)\n # get_path\n assert output.get_path(tile) == os.path.join(*[\n OUT_DIR, \"5\", \"5\", \"5\"+\".tif\"])\n # prepare_path\n try:\n temp_dir = os.path.join(*[OUT_DIR, \"5\", \"5\"])\n output.prepare_path(tile)\n assert os.path.isdir(temp_dir)\n finally:\n shutil.rmtree(temp_dir, ignore_errors=True)\n # profile\n assert isinstance(output.profile(tile), dict)\n # write\n try:\n tile.data = np.ones((1, ) + tile.shape)*128\n output.write(tile)\n # tiles_exist\n assert output.tiles_exist(tile)\n # read\n data = output.read(tile).data\n assert isinstance(data, np.ndarray)\n assert not data[0].mask.any()\n finally:\n shutil.rmtree(temp_dir, ignore_errors=True)\n # read empty\n data = output.read(tile).data\n assert isinstance(data, np.ndarray)\n assert data[0].mask.all()\n # empty\n empty = output.empty(tile)\n assert isinstance(empty, ma.MaskedArray)\n assert not empty.any()\n # deflate with predictor\n output_params.update(compression=\"deflate\", predictor=2)\n output = gtiff.OutputData(output_params)\n assert output.profile(tile)[\"compress\"] == \"deflate\"\n assert output.profile(tile)[\"predictor\"] == 2", "def render_tiles(output):\n chunks = [output[i:i + 3] for i in range(0, len(output), 3)]\n max_i = max_j = 0\n for i, j, _ in chunks:\n max_i, max_j = max(i, max_i), max(j, max_j)\n\n matrix = [[None] * (max_j + 1) for _ in range(max_i + 1)]\n\n for i, j, tile_id in chunks:\n matrix[i][j] = draw_tile(tile_id)\n\n for i, row in enumerate(matrix):\n matrix[i] = \" \".join(row)\n return matrix", "def stich(data, title=None):\n # Get name, list of tiles, width and height\n name = data[\"levels\"][0][\"name\"] \n tiles = data[\"levels\"][0][\"tiles\"]\n width = data[\"levels\"][0][\"width\"]\n height = data[\"levels\"][0][\"height\"]\n\n # Create the directory to place all the downloaded tiles in\n if title: #if title provided, name directory based on that\n dirname = title\n else: #if title not provided, generate a name\n dirname = name + str(width) + str(height)\n os.makedirs(dirname, exist_ok=True)\n os.chdir(dirname)\n\n #Create the empty image based on dimensions\n result = Image.new('RGB', (width, height))\n tile_size = None \n\n # actually get the tiles\n for i in tiles:\n image = get_tile(i['url']) #download image\n if not tile_size:\n tile_size = image.size[0] # on the first tile get the image size\n result.paste(im=image, box=(i['x'] * tile_size, i['y'] * tile_size)) # each tile has a number which isn't\n # it's cooridnate in pixels but it's order. \n # To get pixel coordinate just multiply by the size of each tile\n result.save('final.jpeg') # save file in directory\n os.chdir(os.path.join( os.path.dirname( __file__ ), '..' )) # then navigate back up to the base directory", "def build_tiles(img,tilefile,tilesize,options=[]):\n\tlevels=ceil(log(max(img.get_xsize(),img.get_ysize())/tilesize)/log(2.0))\n\t\n\ttf=file(tilefile,\"w\")\n\t\n\ttile_dict={}\n\tpos=0\n\timg2=img.copy()\n\txs,ys=img2.get_xsize(),img2.get_ysize()\n\tfor l in range(int(levels)):\n\t\trmin=img2.get_attr(\"mean\")-img2.get_attr(\"sigma\")*3.0\n\t\trmax=img2.get_attr(\"mean\")+img2.get_attr(\"sigma\")*3.0\n\t\tfor x in range(0,img2.get_xsize(),tilesize):\n\t\t\tfor y in range(0,img2.get_ysize(),tilesize):\n\t\t\t\ti=img2.get_clip(Region(x,y,tilesize,tilesize))\n\t\t\t\ti.set_attr(\"render_min\",rmin)\n\t\t\t\ti.set_attr(\"render_max\",rmax)\n\t\t\t\ti.set_attr(\"jpeg_quality\",70)\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ti.write_image(fsp)\n\t\t\t\tsz=os.stat(fsp).st_size\n\t\t\t\ttile_dict[(l,x/tilesize,y/tilesize)]=(pos,sz)\n\t\t\t\tpos+=sz\n\t\timg2.process_inplace(\"math.meanshrink\",{\"n\":2})\n\t\n\t# This will produce 2 power spectrum images in the tile file\n\t# with scale factors -1 and -2\n\tif \"pspec\" in options :\n\t\tnx,ny=img.get_xsize()/512,img.get_ysize()/512\n\t\ta=EMData()\n\t\ta.set_size(512,512)\n\t\tif (ny>2 and nx>2) :\n\t\t\tfor y in range(1,ny-1):\n\t\t\t\tfor x in range(1,nx-1):\n\t\t\t\t\tc=img.get_clip(Region(x*512,y*512,512,512))\n\t\t\t\t\tc.process_inplace(\"normalize\")\n\t\t\t\t\tc.process_inplace(\"math.realtofft\")\n\t\t\t\t\tc.process_inplace(\"math.squared\")\n\t\t\t\t\ta+=c\n\t\t\ta.set_value_at(256,256,0,.01)\n\t\t\ta-=a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.01\n\t\t\ta.process_inplace(\"math.log\")\n\t\t\ta-=a.get_attr(\"minimum\")\n\t\t\ta.set_attr(\"render_min\",a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.1)\n\t\t\ta.set_attr(\"render_max\",a.get_attr(\"mean\")+a.get_attr(\"sigma\")*4.0)\n\t\t\ta.set_attr(\"jepg_quality\",80)\n\t\t\ta.write_image(\"/tmp/tmpimg.mrc\")\n\t\t\tfsp=\"tmpimg.jpg\"\n\t\t\ta.write_image(fsp)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-1,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\t\n#\t\ttry:\n\t\t\timport matplotlib\n\t\t\tmatplotlib.use('Agg')\n\t\t\timport pylab\n\t\t\tmanager = pylab.get_current_fig_manager()\n\t\t\tapix=options[\"pspec\"]\n\t\t\tdx=1.0/(2.0*apix*256.0)\n\t\t\tx=pylab.arange(dx,dx*255.9,dx)\n\t\t\ty=a.calc_radial_dist(255,1,1,0)\t# radial power spectrum (log)\n\t\t\tpylab.figure(figsize=(8,6),dpi=96)\n\t\t\tpylab.axes([.08,.08,.9,.9], axisbg='w')\n\t\t\tpylab.plot(x,y)\n\t\t\tpylab.axis([0,dx*256,min(y),max(y)])\n\t\t\tpylab.xlabel(\"Spatial Freq. (1/A)\")\n\t\t\tpylab.ylabel(\"Log Intensity (10^x)\")\n#\t\t\tprint y\n\t\t\t\n\t\t\tfsp=\"tmpimg2.png\"\n\t\t\tpylab.savefig(fsp,dpi=96)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-2,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\n#\t\texcept:\n#\t\t\tprint \"Unable to generate plot (need matplotlib)\"\n\t\t\t\n\t\n\tpickle.dump(tile_dict,tf)\n\t\n\tfor l in range(int(levels)):\n\t\tfor x in range(0,xs,tilesize):\n\t\t\tfor y in range(0,ys,tilesize):\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ta=file(fsp,\"r\")\n\t\t\t\tb=a.read()\n\t\t\t\ta.close()\n\t\t\t\ttf.write(b)\n\t\t\t\tos.remove(fsp)\n\t\txs/=2\n\t\tys/=2\n\t\n\tif \"pspec\" in options :\n\t\tfor fsp in [\"tmpimg.jpg\",\"tmpimg2.png\"] :\n\t\t\ta=file(fsp,\"r\")\n\t\t\tb=a.read()\n\t\t\ta.close()\n\t\t\ttf.write(b)\n#\t\t\tos.remove(fsp)\n\t\n\ttf.close()", "def get_building_tiles(building, btype=\"buildings\"):\n\n def read_building(f):\n output = []\n for line in f:\n line = line.strip()\n row = []\n for tile in line.split():\n row.append(int(tile))\n output.append(row)\n return output\n\n with open(os.path.join(os.path.dirname(__file__),\n \"%s/\" % btype,\n \"%s.tiles\" % building)) as building_file:\n\n output = read_building(building_file)\n\n hitmap_file = os.path.join(os.path.dirname(__file__),\n \"%s/\" % btype,\n \"%s.hitmap\" % building)\n if os.path.exists(hitmap_file):\n with open(hitmap_file) as hitmap_file:\n hitmap = read_building(hitmap_file)\n else:\n hitmap = [[0 for i in range(len(output[j]))] for\n j in range(len(output))]\n\n portal_file = os.path.join(os.path.dirname(__file__),\n \"%s/\" % btype,\n \"%s.portals\" % building)\n portals = []\n if os.path.exists(portal_file):\n with open(portal_file) as portal_file:\n for p in portal_file:\n x, y, width, height, destination, dest_crds = p.strip().split()\n dest_crds = map(float, dest_crds.split(\":\"))\n portals.append({\n \"x\": float(x),\n \"y\": float(y),\n \"width\": int(width),\n \"height\": int(height),\n \"destination\": destination,\n \"dest_coords\": dest_crds})\n\n return (len(output[0]), len(output), output, hitmap, portals, )", "def write_tiles_to_file(tiles, gfx_file, output_file=None):\n if output_file is None:\n output_file = gfx_file + '_edit'\n\n sorted_tiles = sorted(tiles, key=lambda tile: int(tile.address, 16))\n\n with open(gfx_file, 'rb') as gfx_reader:\n with open(output_file, 'wb') as f:\n for tile in sorted_tiles:\n converted_addr = convert_mame_addr(tile.address, tile.dimensions)\n read_length = converted_addr - gfx_reader.tell()\n if read_length == 128:\n gfx_reader.seek(read_length, 1)\n f.write(tile.data)\n else:\n unchanged_gfx = gfx_reader.read(read_length)\n f.write(unchanged_gfx)\n gfx_reader.seek(128, 1)\n f.write(tile.data)\n\n final_read = gfx_reader.read()\n f.write(final_read)", "def createTiles():\n Renderer.Clear()\n map = []\n w, h = len(testmap[0]), len(testmap)\n x, y = 0, 0\n for row in testmap:\n for char in row:\n map.append(makeTile(char, x, y))\n x += 1\n y += 1\n x = 0\n\n return map, w, h", "def renderMetaTile(z, x, y, ntiles, hypsoreliefMap, landcoverreliefMap, areasMap, oceanMap, contoursMap, featuresMap):\n hypsorelief = renderLayer('hypsorelief', z, x, y, ntiles, hypsoreliefMap, 'png')\n landcoverrelief = renderLayer('landcoverrelief', z, x, y, ntiles, landcoverreliefMap, 'png')\n areas = renderLayer('areas', z, x, y, ntiles, areasMap, 'png')\n ocean = renderLayer('ocean', z, x, y, ntiles, oceanMap, 'png', True)\n contours = renderLayer('contours', z, x, y, ntiles, contoursMap, 'png', True)\n features = renderLayer('features', z, x, y, ntiles, featuresMap, 'png', True)\n base_h = getComposite((hypsorelief, areas, ocean))\n base_l = getComposite((landcoverrelief, ocean))\n composite_h = getComposite((base_h, contours, features))\n composite_l = getComposite((base_l, contours, features))\n saveTiles(z, x, y, ntiles, 'composite_h', composite_h)\n saveTiles(z, x, y, ntiles, 'composite_l', composite_l)\n if SAVE_JPEG_COMPOSITE:\n basename = 'jpeg' + str(JPEG_COMPOSITE_QUALITY)\n saveTiles(z, x, y, ntiles, basename+'_h', composite_h, 'jpg', basename)\n saveTiles(z, x, y, ntiles, basename+'_l', composite_l, 'jpg', basename)\n if SAVE_INTERMEDIATE_TILES:\n saveTiles(z, x, y, ntiles, 'base_h', base_h)\n saveTiles(z, x, y, ntiles, 'base_l', base_l)\n saveTiles(z, x, y, ntiles, 'contours', contours)\n saveTiles(z, x, y, ntiles, 'hypsorelief', hypsorelief)\n saveTiles(z, x, y, ntiles, 'landcoverrelief', landcoverrelief)\n saveTiles(z, x, y, ntiles, 'areas', areas)\n saveTiles(z, x, y, ntiles, 'ocean', ocean)\n saveTiles(z, x, y, ntiles, 'features', features)", "def download_tile(self, xtile, ytile):\n location = 'http://maps.six.nsw.gov.au/arcgis/rest/services/public/NSW_Imagery/MapServer/tile/'\n destination = 'downloaded_tiles/'\n save_name = str(self.zoom_level) + '_' + str(xtile) + '_' + str(ytile)\n tile_url = location + save_name.replace('_', '/')\n tile = requests.get(tile_url, stream=True)\n with open(destination + save_name + '.png', 'wb') as out_file:\n tile.raw.decode_content = True\n shutil.copyfileobj(tile.raw, out_file)\n tilepng = png.Reader(file=tile.raw)\n # shutil.copyfileobj(tilepng, out_file)\n del tile", "def write_overview_tile(self, tx, ty, tz,tms_osm):\n\n image_format = self.get_overview_tile_format(tx, ty, tz)\n\n if image_format is None:\n return\n else:\n num_bands = self.get_num_bands(image_format)\n\n dsquery = self.mem_drv.Create('', 2*self.tile_size, 2*self.tile_size, num_bands)\n self.fill_init_dest(dsquery)\n # tms: z=19: 281626\n # -z=18-140813 176168*2=352336; 176168*2+1=352337\n # -- 352336,352337\n y_from=2*ty\n y_to=2*ty + 1\n ty_tms=ty;\n s_y_type=\"tms\"\n if tms_osm:\n # osm: z=19: 281626\n # -z=18-140813 85975*2+1=171951; 85975*2=171950\n # -- 171951,171950 [in range: last/end not used]\n y_from=2*ty + 1\n y_to=2*ty\n ty_tms=(2**tz-1) - ty\n s_y_type=\"osm\"\n s_tile_id=\"{0}-{1}-{2}.{3}\".format(str(tz), str(tx),str(ty),s_y_type)\n if self.verbose:\n # Build from zoom 19 tiles: (281626, 171951) (281627, 171951) (281626, 171950) (281627, 171950)\n print \"\\tBuild [\",s_tile_id,\"] from [\",self.output_dir,\"] zoom\", tz+1,\" tiles [\",s_y_type,\"]: \", (2*tx, y_from), (2*tx+1, y_from),(2*tx, y_to), (2*tx+1, y_to)\n\n for cx, cy, child_image_format in self.iter_children(tx, ty, tz):\n if (ty_tms==0 and cy==1) or (ty_tms!=0 and (cy % (y_from)) != 0):\n tileposy = 0\n else:\n tileposy = self.tile_size\n if tx:\n tileposx = cx % (2*tx) * self.tile_size\n elif tx==0 and cx==1:\n tileposx = self.tile_size\n else:\n tileposx = 0\n\n path = self.get_full_path(cx, cy, tz+1, format_extension[child_image_format])\n\n dsquerytile = gdal.Open(path, gdal.GA_ReadOnly)\n\n dsquery.WriteRaster(tileposx, tileposy, self.tile_size, self.tile_size,\n dsquerytile.ReadRaster(0, 0, self.tile_size, self.tile_size),\n band_list=range(1, dsquerytile.RasterCount+1))\n\n if image_format == \"PNG\" and dsquerytile.RasterCount != num_bands:\n dsquery.WriteRaster(tileposx, tileposy, self.tile_size, self.tile_size,\n self.get_alpha_filler(), band_list=[num_bands])\n\n dstile = self.mem_drv.Create('', self.tile_size, self.tile_size, num_bands)\n path = self.get_full_path(tx, ty, tz, format_extension[image_format])\n self.resampler(path, dsquery, dstile, image_format)", "def generate_osm_export(\n export_type, username, id_string, export_id=None, options=None, xform=None\n):\n\n extension = options.get(\"extension\", export_type)\n\n if xform is None:\n xform = XForm.objects.get(user__username=username, id_string=id_string)\n\n kwargs = get_osm_data_kwargs(xform)\n osm_list = OsmData.objects.filter(**kwargs)\n content = get_combined_osm(osm_list)\n timestamp = datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\")\n basename = f\"{id_string}_{timestamp}\"\n filename = basename + \".\" + extension\n file_path = os.path.join(username, \"exports\", id_string, export_type, filename)\n\n export_filename = write_temp_file_to_path(extension, content, file_path)\n\n export = get_or_create_export_object(export_id, options, xform, export_type)\n\n dir_name, basename = os.path.split(export_filename)\n export.filedir = dir_name\n export.filename = basename\n export.internal_status = Export.SUCCESSFUL\n export.save()\n\n return export", "def generate_overlay_images(self, overlay_name, x, y):\n for tile in range(1, self.config.number_of_tiles + 1):\n for time in self.config.time_points:\n for depth in self.config.depth_points:\n desc = self.config.tile_description_generator(tile, time, depth)\n file_name = self._generate_overlay_file_name(self.well, overlay_name, desc)\n self._generate_overlay(file_name, overlay_name + '-' + self.well + '-' + desc, x, y)", "def save_tiles(tiles, prefix=\"\", directory=os.getcwd(), format=\"png\"):\n for tile in tiles:\n tile.save(\n filename=tile.generate_filename(\n prefix=prefix, directory=directory, format=format\n ),\n format=format,\n )\n return tuple(tiles)", "def create_tiles(self, zoom):\n # Compute the tile x-y-z index range for the rasterlayer for this zoomlevel\n bbox = self.rasterlayer.extent()\n indexrange = tiler.tile_index_range(bbox, zoom)\n\n # Compute scale of tiles for this zoomlevel\n tilescale = tiler.tile_scale(zoom)\n\n # Count the number of tiles that are required to cover the raster at this zoomlevel\n nr_of_tiles = (indexrange[2] - indexrange[0] + 1) * (indexrange[3] - indexrange[1] + 1)\n\n # Create destination raster file\n self.log('Snapping dataset to zoom level {0}'.format(zoom))\n\n bounds = tiler.tile_bounds(indexrange[0], indexrange[1], zoom)\n sizex = (indexrange[2] - indexrange[0] + 1) * self.tilesize\n sizey = (indexrange[3] - indexrange[1] + 1) * self.tilesize\n dest_file = os.path.join(self.tmpdir, 'djangowarpedraster' + str(zoom) + '.tif')\n\n snapped_dataset = self.dataset.warp({\n 'name': dest_file,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'width': sizex,\n 'height': sizey,\n })\n\n self.log('Creating {0} tiles for zoom {1}.'.format(nr_of_tiles, zoom))\n\n counter = 0\n for tilex in range(indexrange[0], indexrange[2] + 1):\n for tiley in range(indexrange[1], indexrange[3] + 1):\n # Log progress\n counter += 1\n if counter % 250 == 0:\n self.log('{0} tiles created at zoom {1}'.format(counter, zoom))\n\n # Calculate raster tile origin\n bounds = tiler.tile_bounds(tilex, tiley, zoom)\n\n # Construct band data arrays\n pixeloffset = (\n (tilex - indexrange[0]) * self.tilesize,\n (tiley - indexrange[1]) * self.tilesize\n )\n\n band_data = [\n {\n 'data': band.data(offset=pixeloffset, size=(self.tilesize, self.tilesize)),\n 'nodata_value': band.nodata_value\n } for band in snapped_dataset.bands\n ]\n\n # Add tile data to histogram\n if zoom == self.max_zoom:\n self.push_histogram(band_data)\n\n # Warp source raster into this tile (in memory)\n dest = GDALRaster({\n 'width': self.tilesize,\n 'height': self.tilesize,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'srid': WEB_MERCATOR_SRID,\n 'datatype': snapped_dataset.bands[0].datatype(),\n 'bands': band_data,\n })\n\n # Store tile\n RasterTile.objects.create(\n rast=dest,\n rasterlayer=self.rasterlayer,\n tilex=tilex,\n tiley=tiley,\n tilez=zoom\n )\n\n # Store histogram data\n if zoom == self.max_zoom:\n bandmetas = RasterLayerBandMetadata.objects.filter(rasterlayer=self.rasterlayer)\n for bandmeta in bandmetas:\n bandmeta.hist_values = self.hist_values[bandmeta.band].tolist()\n bandmeta.save()\n\n # Remove snapped dataset\n self.log('Removing snapped dataset.', zoom=zoom)\n snapped_dataset = None\n os.remove(dest_file)", "def main(ini_path=None, overwrite_flag=False,\n tile_cols='', tile_rows='', delay=0):\n logging.info('\\nExport annual ET/ETrF/ETr/count image tiles')\n\n # Read config file\n ini = inputs.read(ini_path)\n inputs.parse_section(ini, section='INPUTS')\n inputs.parse_section(ini, section='INTERPOLATE')\n inputs.parse_section(ini, section='EXPORT')\n inputs.parse_section(ini, section=ini['INPUTS']['et_model'])\n\n if os.name == 'posix':\n shell_flag = False\n else:\n shell_flag = True\n\n # Limit tile ranges from command line\n # Eventually move to config file?\n try:\n tile_cols_list = list(utils.parse_int_set(tile_cols))\n except:\n tile_cols_list = []\n try:\n tile_rows_list = list(utils.parse_int_set(tile_rows))\n except:\n tile_rows_list = []\n\n logging.debug('\\nInitializing Earth Engine')\n ee.Initialize()\n\n # Get current running tasks\n tasks = utils.get_ee_tasks()\n\n # Get list of existing images/files\n if ini['EXPORT']['export_dest'] == 'ASSET':\n logging.debug('\\nGetting GEE asset list')\n asset_list = utils.get_ee_assets(\n ini['EXPORT']['output_ws'], shell_flag=shell_flag)\n logging.debug(asset_list)\n # elif ini['EXPORT']['export_dest'] == 'CLOUD':\n # logging.debug('\\nGetting cloud storage file list')\n # cloud_list = utils.get_bucket_files(\n # ini['EXPORT']['project_name'], ini['EXPORT']['output_ws'],\n # shell_flag=shell_flag)\n # # It may be necessary to remove image tile notation\n # elif ini['EXPORT']['export_dest'] == 'GDRIVE':\n # logging.debug('\\nGetting Google drive file list')\n # gdrive_list = [\n # os.path.join(ini['EXPORT']['output_ws'], x)\n # for x in os.listdir(ini['EXPORT']['output_ws'])]\n # # It may be necessary to remove image tile notation\n # # Very large tiles may get split up automatically by EE\n # # Strip the EE tile notation data from the image list\n # # gdrive_list = list(set([\n # # re.sub('-\\d{10}-\\d{10}.tif', '.tif', x)\n # # for x in os.listdir(ini['EXPORT']['output_ws'])]))\n # # logging.debug(gdrive_list)\n\n # Get list of tiles that intersect the study area\n logging.debug('\\nBuilding export list')\n export_list = list(ard_tile_export_generator(\n ini['INPUTS']['study_area_path'],\n wrs2_coll=ini['INPUTS']['wrs2_coll'],\n cell_size=ini['EXPORT']['cell_size'],\n wrs2_tile_list=ini['INPUTS']['wrs2_tiles'],\n wrs2_tile_field=ini['INPUTS']['wrs2_tile_field'],\n wrs2_buffer=ini['INPUTS']['wrs2_buffer']))\n if not export_list:\n logging.error('\\nEmpty export list, exiting')\n return False\n\n # Save export list to json\n with open('export_tiles.json', 'w') as json_f:\n json.dump(export_list, json_f)\n\n\n # Process each tile separately\n logging.info('\\nImage Exports')\n for export_n, export_info in enumerate(export_list):\n tile_col = int(export_info['index'][1:4])\n tile_row = int(export_info['index'][5:8])\n if tile_cols_list and int(tile_col) not in tile_cols_list:\n logging.debug('ARD Tile: {} ({}/{}), skipping'.format(\n export_info['index'], export_n + 1, len(export_list)))\n continue\n elif tile_rows_list and int(tile_row) not in tile_rows_list:\n logging.debug('ARD Tile: {} ({}/{}), skipping'.format(\n export_info['index'], export_n + 1, len(export_list)))\n continue\n else:\n logging.info('ARD Tile: {} ({}/{})'.format(\n export_info['index'], export_n + 1, len(export_list)))\n\n logging.debug(' Shape: {}'.format(export_info['shape']))\n logging.debug(' Transform: {}'.format(export_info['geo']))\n logging.debug(' Extent: {}'.format(export_info['extent']))\n logging.debug(' MaxPixels: {}'.format(export_info['maxpixels']))\n logging.debug(' WRS2 tiles: {}'.format(\n ', '.join(export_info['wrs2_tiles'])))\n\n\n if ini['INPUTS']['et_model'] == 'EEFLUX':\n # Get the Landsat collection\n landsat_coll = landsat.get_landsat_coll(\n wrs2_tile_list=export_info['wrs2_tiles'],\n cloud_cover=ini['INPUTS']['cloud_cover'],\n start_date=ini['INTERPOLATE']['start_date'],\n end_date=ini['INTERPOLATE']['end_date'],\n landsat5_flag=ini['INPUTS']['landsat5_flag'],\n landsat7_flag=ini['INPUTS']['landsat7_flag'],\n landsat8_flag=ini['INPUTS']['landsat8_flag'],\n landsat_type='RAD')\n\n # Compute ETf for each Landsat scene\n # The 'BQA' band is also being returned by the etrf method\n def apply_et_fraction(image):\n etrf_obj = eeflux.EEFlux(ee.Image(image)).etrf\n etrf_img = ee.Image(etrf_obj.select(['etrf'], ['etf'])) \\\n .clamp(-1, 2)\n cloud_mask = landsat.landsat_bqa_cloud_mask_func(\n ee.Image(etrf_obj. select(['BQA'])))\n return etrf_img.updateMask(cloud_mask) \\\n .copyProperties(image, ['system:time_start'])\n scene_et_fraction_coll = ee.ImageCollection(\n landsat_coll.map(apply_et_fraction))\n\n else:\n logging.error('\\nInvalid/unsupported ET Model: {}'.format(\n ini['INPUTS']['et_model']))\n return False\n\n\n # Daily reference ET collection\n # Is the \"refet_source\" a function of the model, interpolation, or other?\n # The \"refet_type\" parameter is currently being ignored\n if ini[ini['INPUTS']['et_model']]['refet_source'] == 'GRIDMET':\n daily_et_reference_coll = ee.ImageCollection('IDAHO_EPSCOR/GRIDMET') \\\n .filterDate(ini['INPUTS']['start_date'], ini['INPUTS']['end_date']) \\\n .select(['etr'], ['et_reference'])\n elif ini[ini['INPUTS']['et_model']]['refet_source'] == 'CIMIS':\n daily_et_reference_coll = ee.ImageCollection('projects/climate-engine/cimis/daily') \\\n .filterDate(ini['INPUTS']['start_date'],\n ini['INPUTS']['end_date']) \\\n .select(['etr_asce'], ['et_reference'])\n\n # Compute composite/mosaic images for each image date\n daily_et_fraction_coll = ee.ImageCollection(interpolate.aggregate_daily(\n image_coll=scene_et_fraction_coll,\n start_date=ini['INTERPOLATE']['start_date'],\n end_date=ini['INTERPOLATE']['end_date']))\n\n # Interpolate daily ETf, multiply by daily ETr, and sum to ET\n daily_et_actual_coll = ee.ImageCollection(interpolate.interp_et_coll(\n et_reference_coll=daily_et_reference_coll,\n et_fraction_coll=daily_et_fraction_coll,\n interp_days=ini['INTERPOLATE']['interp_days'],\n interp_type=ini['INTERPOLATE']['interp_type']))\n\n # Export products\n # for product in ini['EXPORT']['products']:\n\n # logging.debug('\\n Product: {}'.format(product))\n export_id = ini['EXPORT']['export_id_fmt'].format(\n model=ini['INPUTS']['et_model'].lower(),\n # product=product.lower(),\n study_area=ini['INPUTS']['study_area_name'],\n index=export_info['index'],\n start=ini['INPUTS']['start_date'],\n end=ini['INPUTS']['end_date'],\n export=ini['EXPORT']['export_dest'].lower())\n export_id = export_id.replace('-', '')\n logging.debug(' Export ID: {}'.format(export_id))\n\n # if product == 'scene_id':\n # # Export the scene list CSV to Google Drive\n # if ini['EXPORT']['export_dest'] == 'GDRIVE':\n # export_path = os.path.join(\n # ini['EXPORT']['output_ws'], export_id + '.csv')\n # elif ini['EXPORT']['export_dest'] == 'CLOUD':\n # export_path = '{}/{}/{}'.format(\n # ini['EXPORT']['output_ws'], product, export_id + '.csv')\n # if ini['EXPORT']['export_dest'] == 'CLOUD':\n # # Write each product to a separate folder\n # export_path = '{}/{}/{}'.format(\n # ini['EXPORT']['output_ws'], product, export_id + '.tif')\n # elif ini['EXPORT']['export_dest'] == 'GDRIVE':\n # export_path = os.path.join(\n # ini['EXPORT']['output_ws'], export_id + '.tif')\n if ini['EXPORT']['export_dest'] == 'ASSET':\n # Write each product to a separate folder\n export_path = '{}/{}'.format(\n ini['EXPORT']['output_ws'], export_id)\n else:\n logging.warning(' Unsupported product type, skipping')\n continue\n logging.debug(' Export folder: {}'.format(\n os.path.dirname(export_path)))\n logging.debug(' Export file: {}'.format(\n os.path.basename(export_path)))\n\n if overwrite_flag:\n if export_id in tasks.keys():\n logging.debug(' Task already submitted, cancelling')\n ee.data.cancelTask(tasks[export_id])\n\n # This is intentionally not an \"elif\" so that a task can be\n # cancelled and an existing image/file/asset can be removed\n if (ini['EXPORT']['export_dest'] == 'ASSET' and\n export_path in asset_list):\n logging.debug(' Asset already exists')\n subprocess.check_output(\n ['earthengine', 'rm', export_path],\n shell=shell_flag)\n # Files in cloud storage are easily overwritten\n # so it is unneccesary to manually remove them\n # # This would remove an existing file\n # subprocess.call(['gsutil', 'rm', export_path])\n # if (ini['EXPORT']['export_dest'] == 'CLOUD' and\n # export_path in cloud_list):\n # logging.debug(' Export image already exists')\n # # Files in cloud storage are easily overwritten\n # # so it is unneccesary to manually remove them\n # # # This would remove an existing file\n # # subprocess.check_output(['gsutil', 'rm', export_path])\n # elif (ini['EXPORT']['export_dest'] == 'GDRIVE' and\n # export_path in gdrive_list):\n # logging.debug(' Export image already exists, removing')\n # os.remove(export_path)\n # # Remove automatically generated image tiles\n # # for f in glob.glob(export_path.replace('.tif', '*.tif')):\n # # os.remove(f)\n else:\n if export_id in tasks.keys():\n logging.debug(' Task already submitted, skipping')\n continue\n if (ini['EXPORT']['export_dest'] == 'ASSET' and\n export_path in asset_list):\n logging.debug(' Asset already exists, skipping')\n continue\n # elif (ini['EXPORT']['export_dest'] == 'CLOUD' and\n # export_path in cloud_list):\n # logging.debug(' Export file already exists, skipping')\n # continue\n # elif (ini['EXPORT']['export_dest'] == 'GDRIVE' and\n # os.path.isfile(export_path)):\n # logging.debug(' Export file already exists, skipping')\n # continue\n\n # Compute target product\n # if product == 'scene_id':\n # def scene_id_extract(image):\n # return ee.Feature(None).setMulti({\n # 'SCENE_ID': ee.String(image.get('SCENE_ID'))})\n # scene_id_coll = ee.FeatureCollection(\n # scene_et_fraction_coll.map(scene_id_extract)).sort('SCENE_ID')\n\n output_images = []\n for product_i, product in enumerate(ini['EXPORT']['products']):\n logging.debug(' Product: {}'.format(product))\n if product == 'et_actual':\n # Sum daily ET to total ET\n output_images.append(\n ee.Image(daily_et_actual_coll.sum()).toFloat())\n elif product == 'et_reference':\n # Sum daily reference ET to total reference ET\n output_images.append(\n ee.Image(daily_et_reference_coll.sum()).toFloat())\n elif product == 'et_fraction':\n # Compute mean ETf (ET / ETr)\n output_images.append(\n ee.Image(daily_et_actual_coll.sum()) \\\n .divide(ee.Image(daily_et_reference_coll.sum())).toFloat())\n elif product == 'count':\n # Filter count date range to same period as reference ET\n output_images.append(ee.Image(\n daily_et_fraction_coll.filterDate(\n ini['INPUTS']['start_dt'],\n ini['INPUTS']['end_dt'] + datetime.timedelta(days=1)).count())\\\n .toUint8())\n\n # DEADEEF - Consider saving other input parameters\n # CLOUD_COVER_LAND, number of interpolation days, ?\n output_image = ee.Image(ee.Image(output_images) \\\n .rename(ini['EXPORT']['products']) \\\n .setMulti({\n 'system:time_start': ini['INPUTS']['start_date'],\n 'index': export_info['index']}))\n # print(output_image.get('system:time_start').getInfo())\n # input('ENTER')\n\n # Build export tasks\n # if product == 'scene_id':\n # if ini['EXPORT']['export_dest'] == 'CLOUD':\n # task = ee.batch.Export.table.toCloudStorage(\n # scene_id_coll,\n # description=export_id,\n # bucket=ini['EXPORT']['bucket_name'],\n # fileNamePrefix='{}/{}/{}'.format(\n # ini['EXPORT']['bucket_folder'], product, export_id),\n # fileFormat='CSV')\n # elif ini['EXPORT']['export_dest'] == 'GDRIVE':\n # # Export the scene list CSV to Google Drive\n # task = ee.batch.Export.table.toDrive(\n # scene_id_coll,\n # description=export_id,\n # folder=os.path.basename(ini['EXPORT']['output_ws']),\n # fileNamePrefix=export_id,\n # fileFormat='CSV')\n # elif ini['EXPORT']['export_dest'] == 'CLOUD':\n # # Export the image to cloud storage\n # task = ee.batch.Export.image.toCloudStorage(\n # output_image,\n # description=export_id,\n # bucket=ini['EXPORT']['bucket_name'],\n # fileNamePrefix='{}/{}/{}'.format(\n # ini['EXPORT']['bucket_folder'], product, export_id),\n # dimensions=export_info['shape'],\n # crs=export_info['crs'],\n # crsTransform=export_info['geo'],\n # # shardSize=,\n # # fileDimensions=,\n # maxPixels=export_info['maxpixels'])\n # elif ini['EXPORT']['export_dest'] == 'GDRIVE':\n # # Export the images to your Google Drive\n # task = ee.batch.Export.image.toDrive(\n # output_image,\n # description=export_id,\n # folder=os.path.basename(ini['EXPORT']['output_ws']),\n # fileNamePrefix=export_id,\n # dimensions=export_info['shape'],\n # crs=export_info['crs'],\n # crsTransform=export_info['geo'],\n # maxPixels=export_info['maxpixels'])\n if ini['EXPORT']['export_dest'] == 'ASSET':\n # Export the image to cloud storage\n task = ee.batch.Export.image.toAsset(\n output_image,\n description=export_id,\n assetId='{}/{}'.format(ini['EXPORT']['output_ws'], export_id),\n # pyramidingPolicy='mean',\n dimensions=export_info['shape'],\n crs=export_info['crs'],\n crsTransform=export_info['geo'],\n maxPixels=export_info['maxpixels'])\n else:\n logging.debug(' Export task not built, skipping')\n # continue\n\n # Try to start the export task a few times\n logging.debug(' Starting export task')\n for i in range(1, 10):\n try:\n task.start()\n break\n except Exception as e:\n logging.error(\n ' Error: {}\\n Retrying ({}/10)'.format(e, i))\n time.sleep(i ** 2)\n i += 1\n # logging.debug(' Active: {}'.format(task.active()))\n # logging.debug(' Status: {}'.format(task.status()))\n\n if delay and delay > 0:\n time.sleep(delay)\n elif delay and delay == -1:\n input('ENTER')", "def populate_tiles(self):\n\n # grid format :\n # grid(x,y,z)[0]: A valid WorldTile type (i.e. WorldTile.door)\n # grid(x,y,z)[1]: A list of ASCII color or format codes for ColorIze\n # grid(x,y,z)[2]: The tile object\n\n self.t_count = 0 # Tile count, increment for each tile added\n self.build_start = time.clock()\n self.logger.info(\"[*] Starting world building script\")\n\n script_list = [\n self.build_boss_room,\n self.build_rooms,\n self.build_halls,\n self.build_doors,\n self.build_chests,\n self.build_traps,\n self.build_mobs,\n self.build_npcs\n ]\n for func in script_list:\n self.logger.debug(\"\\tRunning {}\".format(func.__name__))\n if not func():\n e_text = \"Build script failed : {}\".format(func.__name__)\n raise AssertionError(e_text)\n\n self.logger.info(\"[*] World building script completed\")\n self.logger.debug(\"\\tTiles Placed : {}\".format(self.t_count))\n build_time = time.clock()-self.build_start\n self.logger.debug(\"\\tTook {}s\".format(build_time))\n self.logger.debug(\"\\tTiles/s : {}\".format(t_count/build_time))", "def output_rasters_cloud(self, arr, outname):\n\n if self.config_dict['path_mode'] == 'aws':\n # later on deleted by s3_delete_local()\n # local_outpath = os.path.join(self.config_dict['temp_folder'], outname)\n local_outname = outname.split('/')[-1]\n local_outpath = os.path.join(self.temp_folder, local_outname)\n self.log.debug('local_outpath {}'.format(local_outpath))\n\n t0 = t_now()\n\n band1 = arr\n # write to a temp folder\n with rasterio.open(local_outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,\n count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:\n wrast.write(band1, indexes=1)\n\n # Buckets are not directories but you can treat them like they are\n # bucket_name = os.path.split(self.config_dict['out_root'])[0] # dev-et-data\n # bucket_prefix = os.path.split(self.config_dict['out_root'])[-1] # tile_modelrun1\n bucket_name = self.config_dict['out_root'].split('/')[0]\n bucket_prefix_list = self.config_dict['out_root'].split('/')[1:]\n print(bucket_prefix_list)\n bucket_prefix = '/'.join(bucket_prefix_list)\n print(\"bucket prefix =\", bucket_prefix)\n bucket_filepath = os.path.join(bucket_prefix, outname) # os.path.join(dev-et-data/tile_modelrun1, outname)\n\n # uploads to aws bucket with filepath\n self.s3_delete_local(local_file=local_outpath, bucket=bucket_name, bucket_filepath=bucket_filepath)\n t_total = t_now() - t0\n self.log.info(\"OUTPUT - TIME - {} - {}\".format(t_total, bucket_filepath))\n\n elif self.config_dict['path_mode'] == 'google':\n print('google path mode not yet implemented')\n sys.exit(0)\n\n else:\n print('PATH MODE in config is not set properly for the cloud implementation of output_Rasters')\n sys.exit(0)", "def generate_tiles(self):\n if self.children:\n for child in self.children:\n child.generate_tiles()\n print \"Generating tile for %s using child tiles\" % self.bbox\n self.generate_tile_from_child_tiles()\n else:\n print \"Generating tile for %s using source data\" % self.bbox\n self.generate_tile_from_source()", "def webtiles_from_geotiffs(\n self,\n geotiff_paths=None,\n update_ranges=True,\n overwrite=True):\n\n # We need min and max for each z-level to create tiles with a\n # consistent color palette to value mapping. Update the config with the\n # min and max values from each z-level calculated during geotiff\n # processing\n if update_ranges:\n self.update_ranges()\n\n logger.info(f'Beginning creation of {len(geotiff_paths)} web tiles')\n\n for geotiff_path in geotiff_paths:\n self.webtile_from_geotiff(geotiff_path, overwrite=overwrite)\n\n logger.info(f'Finished creating {len(geotiff_paths)} web tiles.')", "def output_rasters(self, arr, outdir, outname):\n\n outpath = os.path.join(outdir, outname)\n print('the outpath for file {} is {}'.format(outname, outpath))\n\n # get the geoinfo from sample tiff to output intermediate files\n ds = rasterio.open(self.geoproperties_file)\n band1 = arr\n with rasterio.open(outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,\n count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:\n wrast.write(band1, indexes=1)\n\n # TODO - Set an AWS Cloud flag in the config_dict file to activate this function or not...\n # delete files created locally and put in bucket\n # PathManager.s3_delete_local(from_file, bucket, prefix_no_slash)", "def build_tiles(cls):\n\n LOGGER.debug(\"Building tiles\")\n\n for tile_id in tiledata.TILE_DATA:\n if not Tile.tile_factory(tile_id):\n LOGGER.error(\"Could not construct tile with ID %d\", tile_id)\n sys.exit(1)", "def download(data_type, gs_aoi, main_dir):\n # Get URLs for tiles covered by a polygon:\n # ----------------------------------------\n tiles = get_tile_names(gs_aoi)\n print('Found {} products'.format(len(tiles['tile_names'])))\n\n # Make sure temporary folder for download exists:\n # -----------------------------------------------\n dwn_dir = join(main_dir, data_type)\n if not exists(dwn_dir):\n makedirs(dwn_dir)\n\n # Proceed to download:\n # --------------------\n if data_type == 'DTM':\n # DOWNLOAD DTM FILES & UNZIP:\n # ---------------------------\n print('\\nDownloading DTM files:')\n for num, name in enumerate(tiles['dtm_url']):\n print('{} of {}'.format(num+1, len(tiles['dtm_url'])))\n dwn_stat, file_name = download_file(name, dwn_dir)\n print('File {}.'.format(dwn_stat))\n extract_zip(join(dwn_dir, file_name))\n # Delete ZIP file after extraction\n remove(join(dwn_dir, file_name))\n \n # Finished downloading:\n # ---------------------\n out_msg = 'Finished downloading DTM files!'\n \n elif data_type == 'LAZ':\n # DOWNLOAD LAZ FILES:\n # -------------------\n print('\\nDownloading LAZ files:')\n for num, name in enumerate(tiles['laz_url']):\n print('{} of {}'.format(num+1, len(tiles['laz_url'])))\n dwn_stat, _ = download_file(name, dwn_dir)\n print('File {}.'.format(dwn_stat))\n \n # Finished downloading:\n # ---------------------\n out_msg = 'Finished downloading LAZ files!'\n \n else:\n dwn_dir = None\n out_msg = 'Unexpected data_type'\n \n # Output dictionary:\n # ------------------\n out = {'out_msg': out_msg,\n 'out_dir': dwn_dir}\n \n return out", "def get_test_generator(cf, logger):\n # set up as method to read source data over the whole space (except annotated tiles)\n # and produce generated tiles\n config_file = os.environ[CONFIG_ENV_VAR]\n config = load_config(config_file)\n\n generate_full_output = GENERATE_FULL in os.environ\n\n if generate_full_output:\n # specify all covered tiles\n logger.info(\"Producing output for full data\")\n num_subdirs = len(config[\"subdir_paths\"])\n batch_data = []\n if GENERATE_SUBDIR in os.environ:\n generate_subdirs = [int(os.environ[GENERATE_SUBDIR])]\n else:\n generate_subdirs = range(num_subdirs)\n for subdir_num in generate_subdirs:\n annot_map, _, _ = get_annot_map(config, subdir_num)\n logger.info(\n \"Subdir %d, %d covered tiles\" % (subdir_num, annot_map.sum())\n )\n valid_tiles = np.stack(np.where(annot_map > 0), axis=1)\n batch_data.extend([(subdir_num, t) for t in valid_tiles])\n\n else:\n # choose tiles to output. if preferred tiles are specified, try using each\n # preferred tiles if it is not yet defined, and choose random subdir/tiles for\n # remaining\n # first define maps for each subdir\n subdir_maps = {}\n for subdir_num in range(len(config[\"subdir_paths\"])):\n # generate tiles from unannotated regions\n annot_map, annot_header, annotation_scale = get_annot_map(\n config, subdir_num\n )\n\n completed_map = get_completed_map(\n config, subdir_num, annot_map.shape\n )\n in_progress_map = get_completed_map(\n config, subdir_num, annot_map.shape, find_in_progress=True\n )\n incomplete_map = annot_map - completed_map - in_progress_map\n subdir_maps[subdir_num] = (\n annot_map,\n completed_map,\n in_progress_map,\n incomplete_map,\n )\n\n # check each preferred tile in turn\n chosen_tiles_list = []\n num_generate = config[\"generate_number_tiles\"]\n specified_subdir_num = None\n if GENERATE_SUBDIR in os.environ:\n specified_subdir_num = int(os.environ[GENERATE_SUBDIR])\n\n for index_vals in get_tiles_of_interest(config):\n subdir_num = index_vals[0]\n if (\n specified_subdir_num is not None\n and subdir_num != specified_subdir_num\n ):\n # only choose preferred tiles with given subdir number if specified\n continue\n index_number = np.array(index_vals[1:])\n annot_map, completed_map, in_progress_map, _ = subdir_maps[\n subdir_num\n ]\n try:\n check_index(index_number, completed_map, annot_map)\n check_index(index_number, in_progress_map, annot_map)\n except RuntimeError as e:\n # piece is already annotated/in-progress, or invalid\n logger.info(\n \"Skipping preferred tile %s, reason %s\" % (index_vals, e)\n )\n continue\n # piece is valid\n chosen_tiles_list.append(index_vals)\n if len(chosen_tiles_list) == num_generate:\n break\n\n logger.info(\"Using preferred tiles: %s\" % (chosen_tiles_list,))\n\n # choose a number of random tiles for remaining\n for _ in range(num_generate - len(chosen_tiles_list)):\n if specified_subdir_num is None:\n # choose a random subdir\n subdir_num = np.random.randint(len(config[\"subdir_paths\"]))\n else:\n subdir_num = specified_subdir_num\n (\n annot_map,\n completed_map,\n in_progress_map,\n incomplete_map,\n ) = subdir_maps[subdir_num]\n\n logger.info(\n \"Choosing random tile from subdir %d, %d incomplete tiles out of %d\"\n % (subdir_num, incomplete_map.sum(), annot_map.sum())\n )\n\n # choose tile to cover\n incomplete_tiles = np.stack(np.where(incomplete_map > 0), axis=1)\n chosen_set_index = np.random.randint(incomplete_tiles.shape[0])\n chosen_tile_index = incomplete_tiles[chosen_set_index] # (3,)\n chosen_tiles_list.append([subdir_num] + chosen_tile_index.tolist())\n\n logger.info(\"Chosen tiles: %s\" % (chosen_tiles_list,))\n\n # todo: find sections for incomplete tiles? need to find how this is done with the automated\n # overlapping tile selection\n\n # # for now just choose an origin section from each tile\n # test_sections = [x * annotation_scale for x in chosen_tiles]\n # logger.info(\"Using %d incomplete sections\" % (len(test_sections),))\n\n # create data as a list of tuples, each with the subdir number and tile index\n batch_data = [(x[0], x[1:]) for x in chosen_tiles_list]\n\n batch_gen = {}\n batch_iterator = PatientBatchIterator(\n batch_data, cf, config, generate_full_output\n )\n batch_gen[\"test\"] = batch_iterator\n # batch_gen[\"test\"] = create_data_gen_pipeline(\n # test_sections,\n # cf=cf,\n # annotation_config=config,\n # is_training=False,\n # segments_defined=False,\n # )\n\n # find how many patches per instance\n # patch_size = batch_iterator.patch_size\n # patch_crop_coords_list = dutils.get_patch_crop_coords(\n # np.zeros(config[\"annotation_size\"]), patch_size, min_overlap=np.array(patch_size).min()\n # )\n\n # print(\"num patches %d\" % len(patch_crop_coords_list))\n # batch_gen[\"n_test\"] = len(patch_crop_coords_list) # test_sections)\n # batch_gen[\"n_test\"] = len(chosen_tiles)\n batch_gen[\"n_test\"] = min(len(batch_data), config[\"generate_number_tiles\"])\n\n # set up for full export if parameter defined in environ\n if generate_full_output:\n batch_gen[\"exporter\"] = BatchExporter(cf, config)\n batch_gen[\"repeat_test_output\"] = True\n return batch_gen", "def build_outputs(self, **inputs):\n print(\"Building all outputs, \", self.name)\n# invscale, _ = self.build_output('invscale', **inputs)\n# loc, _ = self.build_output('loc', invscale=invscale, **inputs)\n# samp, _ = self.build_output('main', invscale=invscale, loc=loc)\n self.build_output('invscale', **inputs)\n self.build_output('loc', **inputs)\n self.build_output('main', **inputs)", "def save_maps(self, output_dir='.', prefix='', prefix_sep='_',\n names=None):\n if prefix == '':\n prefix_sep = ''\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n names = names or list(self.maps.keys())\n maps = {k: self.get_map(k) for k in names}\n\n for imgtype, img in maps.items():\n filename = prefix + prefix_sep + imgtype + '.nii.gz'\n outpath = os.path.join(output_dir, filename)\n img.to_filename(outpath)", "def generate_images(generator_model, output_dir, epoch):\n test_image_stack = generator_model.predict(np.random.normal(size=(10, 100)))\n test_image_stack = (test_image_stack * 255)\n test_image_stack = np.squeeze(np.round(test_image_stack).astype(np.uint8))\n tiled_output = tile_images(test_image_stack)\n tiled_output = Image.fromarray(tiled_output)\n outfile = os.path.join(output_dir, 'epoch_{}.png'.format(epoch))\n tiled_output.save(outfile)", "def init_output(self):\n if \"full_generated_data_file\" not in self.annotation_config:\n raise RuntimeError(\n 'Field \"full_generated_data_file\" not defined in project config file'\n )\n filename = self.annotation_config[\"full_generated_data_file\"]\n\n if self.annotation_config[\"full_generate_format\"] == \"real_valued\":\n assert self.annotation_config[\"segmentation_method\"] == \"semantic\"\n\n if GENERATE_SUBDIR in os.environ:\n generate_subdirs = [int(os.environ[GENERATE_SUBDIR])]\n else:\n num_subdirs = len(self.annotation_config[\"subdir_paths\"])\n generate_subdirs = range(num_subdirs)\n\n # produce output for each subdir\n for subdir_num in generate_subdirs:\n # get dimensions of region\n annot_map, _, _ = get_annot_map(\n self.annotation_config, subdir_num\n ) # shape (tiles_x, tiles_y, tiles_z)\n\n if self.annotation_config[\"trim_generated_extent\"]:\n # define output extent based on populated tiles\n populated_tiles = np.stack(\n np.where(annot_map)\n ) # shape (3, num_tiles)\n extent_origin = np.min(populated_tiles, axis=1)\n extent_size = (\n np.max(populated_tiles, axis=1) - extent_origin + 1\n )\n self.extent_origins[subdir_num] = extent_origin\n annotation_extent = extent_size * self.tile_size\n cover_percentage = np.prod(extent_size) / np.prod(\n annot_map.shape\n )\n print(\n \"ext orig\",\n extent_origin,\n \"max\",\n np.max(populated_tiles, axis=1),\n \"size\",\n extent_size,\n )\n print(\n \"Producing output over extent %s (%.2f%% of full)\"\n % (annotation_extent, cover_percentage)\n )\n else:\n annotation_extent = np.array(annot_map.shape) * self.tile_size\n print(\n \"Producing output over full extent %s\"\n % (annotation_extent,)\n )\n\n output_full_path = os.path.join(\n self.annotation_config[\"project_folder\"],\n self.annotation_config[\"subdir_paths\"][subdir_num],\n filename,\n )\n\n if self.annotation_config[\"full_generate_format\"] == \"real_valued\":\n output_dtype = \"f\"\n # todo: allow multiple classes to be output. currently semantic segmentations are squashed into one class.\n num_classes = 1 # self.annotation_config[\"semantic_segmentation_classes\"]\n output_shape = [num_classes] + annotation_extent.tolist()\n else:\n output_dtype = \"i\"\n output_shape = annotation_extent.tolist()\n\n # initialise HDF5 file for output\n if os.path.exists(output_full_path):\n raise RuntimeError(\n \"Output file %s already exists\" % output_full_path\n )\n h5file = h5py.File(output_full_path, \"w\")\n print(\n \"creating array generated_data with shape\",\n output_shape,\n \"dtype\",\n output_dtype,\n )\n h5_dataset_name = self.annotation_config[\n \"source_hdf5_dataset_name\"\n ]\n h5_dataset = h5file.create_dataset(\n h5_dataset_name, shape=output_shape, dtype=output_dtype\n )\n self.output_datasets[subdir_num] = h5_dataset", "def cutTiles(tasksInfo, results, origLocation, destLocation, \\\n completedOnly, nAnswers = 0):\n tmpMosaic = destLocation+\"/tmpMosaic_n\"+str(nAnswers)+\"/\"\n createDir(tmpMosaic)\n\n #Setting info on images\n numberImages = 12\n tmpImg = []\n for i in range(numberImages):\n tmpImg.append(destLocation+\"/tmpImg_n\"+str(i+1).zfill(2)+\"/\")\n createDir(tmpImg[i])\n imgFile = []\n imgFile.append('2011352')\n imgFile.append('2011353')\n imgFile.append('2011355')\n imgFile.append('2011357')\n imgFile.append('2011358')\n imgFile.append('2011359')\n imgFile.append('2011360')\n imgFile.append('2011361')\n imgFile.append('2011362')\n imgFile.append('2011363')\n imgFile.append('2011364')\n imgFile.append('2011365')\n\n #Setting info on image type\n formatFile = \"GTiff\"\n driver = gdal.GetDriverByName(formatFile)\n\n #Open file containing geoinfo on best result and statistical info on all\n if completedOnly == 1:\n f = open(destLocation+'/bestInfo.txt','w')\n #~ fStat = open(destLocation+'/statInfoCompleted.txt','w')\n #~ else:\n #~ fStat = open(destLocation+'/statInfoAll_n'+str(nAnswers)+'.txt','w')\n\n fSelect = open(destLocation+'/selectedTile.txt','w')\n numberTasks = len(tasksInfo)\n print 'tasksInfo: ', len(tasksInfo)\n print 'results: ', len(results)\n for task in range(numberTasks):\n #Checking if the task has the mininum number of answers\n if (sum(results[task]) < nAnswers):\n #If it has not, lets go to the next task\n continue\n #Geting the selected day for each task\n taskId = tasksInfo[task]['taskId']\n definedArea = tasksInfo[task]['area']\n selectedTile = results[task].index(max(results[task]))\n if selectedTile == 0:\n selectedFile = '2011352'\n elif selectedTile == 1:\n selectedFile = '2011353'\n elif selectedTile == 2:\n selectedFile = '2011355'\n elif selectedTile == 3:\n selectedFile = '2011357'\n elif selectedTile == 4:\n selectedFile = '2011358'\n elif selectedTile == 5:\n selectedFile = '2011359'\n elif selectedTile == 6:\n selectedFile = '2011360'\n elif selectedTile == 7:\n selectedFile = '2011361'\n elif selectedTile == 8:\n selectedFile = '2011362'\n elif selectedTile == 9:\n selectedFile = '2011363'\n elif selectedTile == 10:\n selectedFile = '2011364'\n elif selectedTile == 11:\n selectedFile = '2011365'\n print taskId\n print selectedFile\n print definedArea\n fSelect.write(str(taskId)+\" \"+selectedFile+\"\\n\")\n #Printing bestInfo\n if completedOnly == 1:\n f.write(str(definedArea[0])+\" \"+ str(definedArea[1])+\" \"+\\\n str(definedArea[2])+\" \"+str(definedArea[3])+\"\\n\")\n cmd = \"gdal_translate -projwin \"+str(definedArea[0])+\" \"+ \\\n str(definedArea[3])+\" \"+str(definedArea[2])+\" \"+ \\\n str(definedArea[1])+\" \"+origLocation+selectedFile+\".tif \"+ \\\n tmpMosaic+str(taskId)+\".tif\"\n os.system(cmd)\n #Generating image cuts for all images\n for i in range(numberImages):\n cmd = \"gdal_translate -projwin \"+str(definedArea[0])+\" \"+ \\\n str(definedArea[3])+\" \"+str(definedArea[2])+\" \"+ \\\n str(definedArea[1])+\" \"+origLocation+imgFile[i]+\".tif \"+ \\\n tmpImg[i]+str(taskId)+\".tif\"\n os.system(cmd)\n #Changing filename based on the type of result (if all results or\n #completed only.\n if completedOnly == 0:\n if nAnswers == 0:\n fileMosaic = \"mosaicall\"\n else:\n fileMosaic = \"mosaicall\"+\"_n\"+str(nAnswers)\n elif completedOnly == 1:\n if nAnswers == 0:\n fileMosaic = \"mosaiccompleted\"\n else:\n fileMosaic = \"mosaiccompleted\"+\"_n\"+str(nAnswers)\n #Checking if the temporary tile folder is not empty\n if os.listdir(tmpMosaic) == []:\n print \"No output detected for desired parameter N = \" + str(nAnswers)\n #Removing temporary directories\n removeDir(tmpMosaic)\n #Returning error code\n resultCut = 1\n return resultCut\n #Merging the tiles into one mosaic\n cmd = \"gdal_merge.py -init '200 200 200' -o \"+destLocation+fileMosaic+\".tif \"+tmpMosaic+ \\\n \"*.tif\"\n os.system(cmd)\n #Copying file with timestamp\n now = datetime.datetime.now()\n timeCreation = now.strftime(\"%Y-%m-%d_%Hh%M\")\n shutil.copyfile(destLocation+fileMosaic+\".tif\", destLocation+ \\\n fileMosaic+\"_\"+timeCreation+\".tif\")\n #Close file containing geoinfo on best result\n if completedOnly == 1:\n f.close()\n #Close stat file\n #~ fStat.close()\n fSelect.close()\n #Removing temporary directories\n #~ removeDir(tmpMosaic)\n #~ for i in range(numberImg):\n #~ removeDir(tmpImg[i])\n #Final state\n resultCut = 0\n return resultCut", "def process(sources, output, force):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s [%(levelname)s] - %(message)s', datefmt=\"%H:%M:%S\")\n\n logging.getLogger('shapely.geos').setLevel(logging.WARNING)\n logging.getLogger('Fiona').setLevel(logging.WARNING)\n logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)\n requests.packages.urllib3.disable_warnings()\n # logging.getLogger('processing').setLevel(logging.DEBUG)\n\n catalog_features = []\n failures = []\n path_parts_to_skip = utils.get_path_parts(sources).index(\"sources\") + 1\n success = True\n for path in utils.get_files(sources):\n try:\n logging.info(\"Processing \" + path)\n pathparts = utils.get_path_parts(path)[path_parts_to_skip:]\n pathparts[-1] = pathparts[-1].replace('.json', '.geojson')\n\n outdir = os.path.join(output, *pathparts[:-1], pathparts[-1].replace('.geojson', ''))\n outfile = os.path.join(output, *pathparts)\n\n source = utils.read_json(path)\n urlfile = urlparse(source['url']).path.split('/')[-1]\n \n if not hasattr(adapters, source['filetype']):\n logging.error('Unknown filetype ' + source['filetype'])\n failures.append(path)\n continue\n \n read_existing = False\n if os.path.isfile(outfile):\n logging.info(\"Output file exists\")\n if os.path.getmtime(outfile) > os.path.getmtime(path):\n logging.info(\"Output file is up to date\")\n if not force:\n read_existing = True\n logging.warning('Skipping ' + path + ' since generated file exists. Use --force to regenerate.') \n else:\n logging.info(\"Output is outdated, {} < {}\".format(\n datetime.datetime.fromtimestamp(os.path.getmtime(outfile)),\n datetime.datetime.fromtimestamp(os.path.getmtime(path))))\n\n if read_existing:\n with open(outfile, \"rb\") as f:\n geojson = json.load(f)\n properties = geojson['properties']\n else:\n logging.info('Downloading ' + source['url'])\n \n try:\n fp = utils.download(source['url'])\n except IOError:\n logging.error('Failed to download ' + source['url'])\n failures.append(path)\n continue\n \n logging.info('Reading ' + urlfile)\n \n if 'filter' in source:\n filterer = BasicFilterer(source['filter'], source.get('filterOperator', 'and'))\n else:\n filterer = None\n \n try:\n geojson = getattr(adapters, source['filetype'])\\\n .read(fp, source['properties'],\n filterer=filterer,\n layer_name=source.get(\"layerName\", None),\n source_filename=source.get(\"filenameInZip\", None))\n except IOError as e:\n logging.error('Failed to read ' + urlfile + \" \" + str(e))\n failures.append(path)\n continue\n except zipfile.BadZipfile as e:\n logging.error('Unable to open zip file ' + source['url'])\n failures.append(path)\n continue\n finally:\n os.remove(fp.name)\n if(len(geojson['features'])) == 0:\n logging.error(\"Result contained no features for \" + path)\n continue\n excluded_keys = ['filetype', 'url', 'properties', 'filter', 'filenameInZip']\n properties = {k:v for k,v in list(source.items()) if k not in excluded_keys}\n properties['source_url'] = source['url']\n properties['feature_count'] = len(geojson['features'])\n logging.info(\"Generating demo point\")\n properties['demo'] = geoutils.get_demo_point(geojson)\n \n geojson['properties'] = properties\n \n utils.make_sure_path_exists(os.path.dirname(outfile))\n\n #cleanup existing generated files\n if os.path.exists(outdir):\n rmtree(outdir)\n filename_to_match, ext = os.path.splitext(pathparts[-1])\n output_file_dir = os.sep.join(utils.get_path_parts(outfile)[:-1])\n logging.info(\"looking for generated files to delete in \" + output_file_dir)\n for name in os.listdir(output_file_dir):\n base, ext = os.path.splitext(name)\n if base == filename_to_match:\n to_remove = os.path.join(output_file_dir, name)\n logging.info(\"Removing generated file \" + to_remove)\n os.remove(to_remove)\n\n utils.write_json(outfile, geojson)\n\n logging.info(\"Generating label points\")\n label_geojson = geoutils.get_label_points(geojson)\n label_path = outfile.replace('.geojson', '.labels.geojson')\n utils.write_json(label_path, label_geojson)\n\n logging.info('Done. Processed to ' + outfile)\n \n if not \"demo\" in properties:\n properties['demo'] = geoutils.get_demo_point(geojson)\n\n properties['path'] = \"/\".join(pathparts)\n catalog_entry = {\n 'type': 'Feature',\n 'properties': properties,\n 'geometry': geoutils.get_union(geojson)\n }\n catalog_features.append(catalog_entry)\n\n if not os.path.exists(outdir) or not os.path.exists(os.path.join(outdir, \"units.json\")):\n logging.info(\"Generated exploded GeoJSON to \" + outdir)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n # .json instead of .geojson, incase there is a unit named \"source\"\n utils.write_json(os.path.join(outdir, \"source.json\"), catalog_entry) \n units = []\n for feature in geojson['features']:\n feature_id = str(feature['properties']['id'])\n feature_id = feature_id.replace('/', '')\n feature_filename = os.path.join(outdir, feature_id + \".geojson\")\n utils.write_json(feature_filename, feature)\n units.append(feature['properties'])\n utils.write_json(os.path.join(outdir, \"units.json\"), units)\n else:\n logging.debug(\"exploded GeoJSON already exists, not generating\")\n\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Error processing file \" + path)\n failures.append(path)\n success = False\n\n catalog = {\n 'type': 'FeatureCollection',\n 'features': catalog_features\n }\n utils.write_json(os.path.join(output,'catalog.geojson'), catalog)\n\n if not success:\n logging.error(\"Failed sources: \" + \", \".join(failures))\n sys.exit(-1)", "def main(parameters):\n metadata = get_metadata(parameters)\n # pprint(metadata)\n image_api = NswSatelliteImages(parameters, metadata)\n print('Zoom level:', image_api.zoom_level,\n 'Resolution:', image_api.resolution,\n 'Scale:', image_api.scale)\n image_api.download_tile(xtile=39000, ytile=60000)", "def _download_tile_wrapper(args):\n return download_tile(*args)", "def tile(self, z, x, y_tms):\n logger.debug(_(\"Download tile %s\") % ((z, x, y_tms),))\n # Render each keyword in URL ({s}, {x}, {y}, {z}, {size} ... )\n size = self.tilesize\n s = self.tiles_subdomains[(x + y_tms) % len(self.tiles_subdomains)];\n y_osm = (2**int(z) - 1) - int(y_tms)\n try:\n url = self.tiles_url.format(**locals())\n except KeyError, e:\n raise DownloadError(_(\"Unknown keyword %s in URL\") % e)\n logger.debug(_(\"Retrieve tile at %s\") % url)\n r = DOWNLOAD_RETRIES\n sleeptime = 1\n while r > 0:\n try:\n request = urllib2.Request(url)\n for header, value in self.headers.items():\n request.add_header(header, value)\n stream = urllib2.urlopen(request)\n assert stream.getcode() == 200\n return stream.read()\n except (AssertionError, IOError), e:\n logger.debug(_(\"Download error, retry (%s left). (%s)\") % (r, e))\n r -= 1\n time.sleep(sleeptime)\n # progressivly sleep longer to wait for this tile\n if (sleeptime <= 10) and (r % 2 == 0):\n sleeptime += 1 # increase wait\n raise DownloadError(_(\"Cannot download URL %s\") % url)", "def test_generate_tiles(self):\n tile_list = utils.generate_tiles()\n self.assertEqual(len(tile_list), utils.TILE_COUNT)", "def generate_tile(self, tms_x, tms_y, tms_z, arguments):\n pass", "def convert(self):\n self.tilewidth = int(self.tilewidth)\n self.tileheight = int(self.tileheight)\n self.width = int(self.width)\n self.height = int(self.height)\n self.pixel_width = self.width * self.tilewidth\n self.pixel_height = self.height * self.tileheight\n for layer in self.layers:\n self.named_layers[layer.name] = layer\n layer.opacity = float(layer.opacity)\n layer.x = int(layer.x)\n layer.y = int(layer.y)\n layer.width = int(layer.width)\n layer.height = int(layer.height)\n layer.pixel_width = layer.width * self.tilewidth\n layer.pixel_height = layer.height * self.tileheight\n layer.visible = bool(int(layer.visible))\n for tile_set in self.tile_sets:\n self.named_tile_sets[tile_set.name] = tile_set\n tile_set.spacing = int(tile_set.spacing)\n tile_set.margin = int(tile_set.margin)\n for img in tile_set.images:\n if img.trans:\n img.trans = (int(img.trans[:2], 16), int(img.trans[2:4], 16), int(img.trans[4:], 16))\n for obj_group in self.object_groups:\n obj_group.x = int(obj_group.x)\n obj_group.y = int(obj_group.y)\n obj_group.width = int(obj_group.width)\n obj_group.height = int(obj_group.height)\n for map_obj in obj_group.objects:\n map_obj.x = int(map_obj.x)\n map_obj.y = int(map_obj.y)\n map_obj.width = int(map_obj.width)\n map_obj.height = int(map_obj.height)", "def convert(self):\n self.tilewidth = int(self.tilewidth)\n self.tileheight = int(self.tileheight)\n self.width = int(self.width)\n self.height = int(self.height)\n self.pixel_width = self.width * self.tilewidth\n self.pixel_height = self.height * self.tileheight\n for layer in self.layers:\n self.named_layers[layer.name] = layer\n layer.opacity = float(layer.opacity)\n layer.x = int(layer.x)\n layer.y = int(layer.y)\n layer.width = int(layer.width)\n layer.height = int(layer.height)\n layer.pixel_width = layer.width * self.tilewidth\n layer.pixel_height = layer.height * self.tileheight\n layer.visible = bool(int(layer.visible))\n for tile_set in self.tile_sets:\n self.named_tile_sets[tile_set.name] = tile_set\n tile_set.spacing = int(tile_set.spacing)\n tile_set.margin = int(tile_set.margin)\n for img in tile_set.images:\n if img.trans:\n img.trans = (int(img.trans[:2], 16), int(img.trans[2:4], 16), int(img.trans[4:], 16))\n for obj_group in self.object_groups:\n obj_group.x = int(obj_group.x)\n obj_group.y = int(obj_group.y)\n obj_group.width = int(obj_group.width)\n obj_group.height = int(obj_group.height)\n for map_obj in obj_group.objects:\n map_obj.x = int(map_obj.x)\n map_obj.y = int(map_obj.y)\n map_obj.width = int(map_obj.width)\n map_obj.height = int(map_obj.height)", "def get_tiles_from_server(self, variants, server):\n def request_and_crop(zoom, x, y):\n _x = int(math.floor(x))\n _y = int(math.floor(y))\n\n x_mod = 0.5 - (x - _x) #How does this desviates from 0.5\n y_mod = 0.5 - (y - _y) \n\n if x_mod > 0:\n x_start = _x - 1 #1 tile before\n start_xpixel = int(math.floor((1-x_mod)*256))\n else:\n x_start = _x\n start_xpixel = int(math.floor(-1*x_mod*256))\n if y_mod > 0:\n y_start = _y - 1 #1 tile before\n start_ypixel = int(math.floor((1-y_mod)*256))\n else:\n y_start = _y\n start_ypixel = int(math.floor(-1*y_mod*256))\n\n tile = np.zeros((256*2, 256*2, 3), dtype= 'uint8')\n for x in range(2):\n for y in range(2):\n url = 'http://localhost:8080/{}/{}/{}.png'.format(zoom, x_start + x, y_start + y)\n resp = urlopen(url)\n image = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n tile[256*y:256*(y+1), 256*x:256*(x+1),...] = image\n tile = tile[start_ypixel:start_ypixel+256,start_xpixel:start_xpixel+256]\n return tile\n tiles = []\n for _ in range(variants):\n zoom = random.randint(19,21)\n x, y = self.getXY(zoom) \n tile = request_and_crop(zoom, x, y)\n tile = cv2.resize(tile, (self.width, self.height))\n tiles.append(tile)\n tiles = np.stack(tiles)\n return tiles", "def generate_overview_tiles(self):\n\n gdal.SetConfigOption(\"GDAL_PAM_ENABLED\", \"NO\")\n\n print \"Generating Overview Tiles:\"\n\n if self.options.profile == 'garmin': # no overview tiles for 'garmin'\n return\n # Usage of existing tiles: from 4 underlying tiles generate one as overview.\n\n tcount = 0\n zcount = 0\n for tz in range(self.tmaxz-1, self.tminz-1, -1):\n tminx, tminy, tmaxx, tmaxy = self.tminmax[tz]\n tcount += (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n zcount+=1\n if self.options.resume:\n count_tiles=tcount\n zcount+=1\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n count_tiles += (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n i_count = self.tile_exists(0, 0, 0,1)\n if i_count == count_tiles:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; all-tiles [\",zcount,\"] zoom-levels with tiles[\",count_tiles,\"]\"\n return\n ti = 0\n\n # querysize = tilesize * 2\n\n for tz in range(self.tmaxz-1, self.tminz-1, -1):\n tminx, tminy, tmaxx, tmaxy = self.tminmax[tz]\n i_x_column_count=((tmaxx-tminx)+1)\n i_y_column_count=((tmaxy-tminy)+1)\n if self.options.verbose:\n # tx in range(tminx, tmaxx+1) tminx[ 140798 ] tmaxx[ 140872 ] ; ((tmaxx-tmaxy)+1) x_tiles[ -35331 ]\n print \"\\ttz=[\",tz,\"] : tx in range(tminx, tmaxx+1) tminx[\",tminx,\"] tmaxx[\",tmaxx,\"] ; ((tmaxx-tminx)+1) x_tiles[\",i_x_column_count,\"]\"\n # ty_tms in range(tmaxy, tminy-1, -1) tmaxy[ 176204 ] tminy[ 176126 ] ; ((tmaxy-tminy)) y_tiles[ 78 ]\n print \"\\ttz=[\",tz,\"] :ty_tms in range(tmaxy, tminy-1, -1) tmaxy[\",tmaxy,\"] tminy[\",tminy,\"] ; ((tmaxy-tminy)) y_tiles[\",i_y_column_count,\"]\"\n if self.options.resume:\n i_count = self.tile_exists(0, 0, tz,2)\n print \"\\tTile generation skipped because of --??? ; x/y-tiles of z[\",tz,\"] x/y_tiles[\",tcount,\"] i_count[\",i_count,\"]\"\n if i_count == tcount:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; x/y-tiles of z[\",tz,\"] x/y_tiles[\",tcount,\"]\"\n break\n for tx in range(tminx, tmaxx+1):\n tmaxy_work=tmaxy\n if self.options.resume:\n i_count = self.tile_exists(tx, 0, tz,3)\n print \"\\tTile generation skipped because of --??? ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"] i_count[\",i_count,\"]\"\n if i_count == i_y_column_count:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n break\n else:\n if i_count > 0:\n # this assums the rows are compleate, which may NOT be true 18-140798-176204.jpg\n tmaxy_work-=i_count\n if self.options.verbose:\n print \"\\tTile generation skipped to tmaxy[\",tmaxy_work,\"] because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n for ty_tms in range(tmaxy_work, tminy-1, -1): #range(tminy, tmaxy+1):\n ty_osm=self.flip_y(tz,ty_tms)\n ty=ty_tms\n if self.options.tms_osm:\n ty=ty_osm\n if self.stopped:\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None\n break\n\n ti += 1\n\n if self.options.resume:\n exists = self.tile_exists(tx, ty, tz,0)\n if exists and self.options.verbose:\n print \"\\tTile generation skipped because of --resume\"\n else:\n exists = False\n\n if not exists:\n if self.options.verbose:\n print ti, '/', tcount, self.get_verbose_tile_name(tx, ty, tz)\n try:\n self.write_overview_tile(tx, ty, tz,self.options.tms_osm)\n except ImageOutputException, e:\n self.error(\"'%d/%d/%d': %s\" % (tz, tx, ty, e.message))\n\n if not self.options.verbose or self.is_subprocess:\n self.progressbar( ti / float(tcount) )\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None", "def find_tiles(self):\n lat1, lat2 = self.bbox.south, self.bbox.north\n lon1, lon2 = self.bbox.west, self.bbox.east\n # convert to geographic bounding box\n minlat, minlon = min(lat1, lat2), min(lon1, lon2)\n maxlat, maxlon = max(lat1, lat2), max(lon1, lon2)\n\n # convert to tile-space bounding box\n _, xmin, ymin = self.mercator(maxlat, minlon, self.zoom)\n _, xmax, ymax = self.mercator(minlat, maxlon, self.zoom)\n\n # generate a list of tiles\n xs, ys = range(xmin, xmax + 1), range(ymin, ymax + 1)\n tile_list = [(self.zoom, x, y) for (y, x) in product(ys, xs)]\n\n return tile_list", "def make_tiles(input_path, save_path, dimension):\n for filename in os.listdir(input_path):\n if filename.endswith(\".png\"):\n image_path = input_path + filename\n\n width, height = Image.open(image_path).size\n\n # Ensures image is square.\n assert width == height\n # Ensures the image can be cut into the desired dimensions.\n assert width % dimension == 0\n n_tiles = (width / dimension) ** 2\n\n tiles = image_slicer.slice(image_path, n_tiles, save=False)\n image_slicer.save_tiles(\n tiles, directory=save_path, prefix=filename[0:2], format=\"png\"\n )", "def writer(output, output_name, output_data):\n\n kml = simplekml.Kml(name=output_name)\n for exif in output_data:\n if('Latitude' in exif.keys() and\n 'Latitude Reference' in exif.keys() and\n 'Longitude Reference' in exif.keys() and\n 'Longitude' in exif.keys()):\n\n if 'Original Date' in exif.keys():\n dt = exif['Original Date']\n else:\n dt = 'N/A'\n\n if exif['Latitude Reference'] == 'S':\n latitude = '-' + exif['Latitude']\n else:\n latitude = exif['Latitude']\n\n if exif['Longitude Reference'] == 'W':\n longitude = '-' + exif['Longitude']\n else:\n longitude = exif['Longitude']\n\n kml.newpoint(name=exif['Name'],\n description='Originally Created: ' + dt,\n coords=[(longitude, latitude)])\n else:\n pass\n kml.save(os.path.join(output, output_name))", "def xyz_from_grid(x,y,z, pnts_out):\n\tx_flt=x.flatten()\n\ty_flt=y.flatten()[::-1]\n\tz_flt=z.flatten()\n\n\tutil.check_output_dir(pnts_out)\n\tfout = open(pnts_out, 'w')\n\tfout.write(\"x,y,z\\n\")\n\n\tprint(\"Writing out %i xyz triples to %s\" %(len(z_flt),pnts_out))\n\tfor i in range(0, len(z_flt)):\n\t\tif not np.isnan(z_flt[i]):\n\t\t\tfout.write(\"%.6f,%.6f,%.2f\\n\" %(x_flt[i], y_flt[i], z_flt[i]))\n\n\tfout.close()", "def make_e3sm_to_cmip_maps(config, logger, mesh_short_name, creation_date,\n ntasks):\n\n link_dir = '../assembled_files/diagnostics/maps'\n\n try:\n os.makedirs(link_dir)\n except FileExistsError:\n pass\n\n src_scrip_filename = 'ocean.scrip.nc'\n cmip6_grid_res = config.get('files_for_e3sm', 'cmip6_grid_res')\n if cmip6_grid_res == '180x360':\n dst_scrip_filename = 'cmip6_180x360_scrip.20181001.nc'\n elif cmip6_grid_res == '720x1440':\n dst_scrip_filename = 'cmip6_720x1440_scrip.20181001.nc'\n else:\n raise ValueError(f'Unexpected cmip6_grid_res: {cmip6_grid_res}')\n\n parallel_executable = config.get('parallel', 'parallel_executable')\n # split the parallel executable into constituents in case it includes flags\n parallel_command = parallel_executable.split(' ')\n parallel_system = config.get('parallel', 'system')\n if parallel_system == 'slurm':\n parallel_command.extend(['-n', f'{ntasks}'])\n elif parallel_system == 'single_node':\n if ntasks > 1:\n parallel_command.extend(['-n', f'{ntasks}'])\n else:\n raise ValueError(f'Unexpected parallel system: {parallel_system}')\n parallel_command = ' '.join(parallel_command)\n\n map_methods = dict(aave='conserve', mono='fv2fv_flx', nco='nco')\n for suffix, map_method in map_methods.items():\n local_map_filename = f'map_mpas_to_cmip6_{suffix}.nc'\n args = ['ncremap', f'--mpi_pfx={parallel_command}',\n f'--alg_typ={map_method}',\n f'--grd_src={src_scrip_filename}',\n f'--grd_dst={dst_scrip_filename}',\n f'--map={local_map_filename}']\n check_call(args, logger=logger)\n\n map_filename = \\\n f'map_{mesh_short_name}_to_cmip6_{cmip6_grid_res}_{suffix}.{creation_date}.nc' # noqa: E501\n\n symlink(os.path.abspath(local_map_filename),\n f'{link_dir}/{map_filename}')", "def test_write_tiles_3D(self):\n\n img = fixtures.skimage.data.astronaut()\n\n image_height, image_width, num_comps = img.shape\n\n tile_height, tile_width = 256, 256\n\n comp_prec = 8\n irreversible = False\n\n cblockh_init, cblockw_init = 64, 64\n\n numresolution = 6\n\n cparams = openjp2.set_default_encoder_parameters()\n\n outfile = str(self.temp_j2k_filename).encode()\n num_pad_bytes = openjp2.PATH_LEN - len(outfile)\n outfile += b'0' * num_pad_bytes\n cparams.outfile = outfile\n\n # not from openjpeg test file\n cparams.cp_disto_alloc = 1\n\n cparams.tile_size_on = openjp2.TRUE\n cparams.cp_tdx = tile_width\n cparams.cp_tdy = tile_height\n\n cparams.cblockw_init, cparams.cblockh_init = cblockw_init, cblockh_init\n\n # not from openjpeg test file\n cparams.mode = 0\n\n cparams.irreversible = 1 if irreversible else 0\n\n cparams.numresolution = numresolution\n cparams.prog_order = glymur.core.PROGRESSION_ORDER['LRCP']\n\n cparams.tcp_mct = 1\n\n cparams.tcp_numlayers = 1\n cparams.tcp_rates[0] = 0\n cparams.tcp_distoratio[0] = 0\n\n # comptparms == l_params\n comptparms = (openjp2.ImageComptParmType * num_comps)()\n for j in range(num_comps):\n comptparms[j].dx = 1\n comptparms[j].dy = 1\n comptparms[j].w = image_width\n comptparms[j].h = image_height\n comptparms[j].x0 = 0\n comptparms[j].y0 = 0\n comptparms[j].prec = comp_prec\n comptparms[j].bpp = comp_prec\n comptparms[j].sgnd = 0\n\n with ExitStack() as stack:\n codec = openjp2.create_compress(openjp2.CODEC_J2K)\n stack.callback(openjp2.destroy_codec, codec)\n\n info_handler = _INFO_CALLBACK\n\n openjp2.set_info_handler(codec, info_handler)\n openjp2.set_warning_handler(codec, _WARNING_CALLBACK)\n openjp2.set_error_handler(codec, _ERROR_CALLBACK)\n\n image = openjp2.image_tile_create(comptparms, openjp2.CLRSPC_SRGB)\n stack.callback(openjp2.image_destroy, image)\n\n image.contents.x0, image.contents.y0 = 0, 0\n image.contents.x1, image.contents.y1 = image_width, image_height\n image.contents.color_space = openjp2.CLRSPC_SRGB\n\n openjp2.setup_encoder(codec, cparams, image)\n\n filename = str(self.temp_j2k_filename)\n strm = openjp2.stream_create_default_file_stream(filename, False)\n stack.callback(openjp2.stream_destroy, strm)\n\n openjp2.start_compress(codec, image, strm)\n\n # have to change the memory layout of 3D images in order to use\n # opj_write_tile\n openjp2.write_tile(\n codec, 0, _set_planar_pixel_order(img[0:256, 0:256, :]), strm\n )\n openjp2.write_tile(\n codec, 1, _set_planar_pixel_order(img[0:256, 256:512, :]), strm\n )\n openjp2.write_tile(\n codec, 2, _set_planar_pixel_order(img[256:512, 0:256, :]), strm\n )\n openjp2.write_tile(\n codec, 3, _set_planar_pixel_order(img[256:512, 256:512, :]),\n strm\n )\n\n openjp2.end_compress(codec, strm)", "def _OpenOutputFiles(self):\n self.gfile = open(self.geomout, \"w\")\n self.efile = open(self.energyout, \"w\")\n self.PrintEnergyHeader()", "def process(self):\n # Opening and preprocessing of the input file\n if self.options.mbtiles_fromdisk or self.options.mbtiles_todisk:\n if self.options.mbtiles_fromdisk:\n i_parm=10\n if self.options.mbtiles_todisk:\n i_parm=11\n if self.options.verbose:\n print \"GDAL2MbTiles :mbtiles from/to disk [\",i_parm,\"] mbtiles_fromdisk[\",self.options.mbtiles_fromdisk,\"] mbtiles_todisk[\",self.options.mbtiles_todisk,\"]\"\n self.mbtiles_setup(i_parm)\n return\n else:\n if self.options.verbose:\n print \"GDAL2MbTiles :tile creation mbtiles[\",self.options.mbtiles,\"]\"\n self.open_input()\n # Generation of main metadata files and HTML viewers\n self.generate_metadata()\n # Generation of the lowest tiles\n self.generate_base_tiles()\n # Generation of the overview tiles (higher in the pyramid)\n self.generate_overview_tiles()\n # Generating of KML\n self.generate_kml()", "def get_objects():\n\n # check whether this session is over its limit\n if 'tiles' not in session:\n session['tiles'] = 0\n\n print(\"tiles queried in session:\", session['tiles'])\n if session['tiles'] > MAX_TILES_SESSION:\n return \"-1\"\n\n # start time, get params\n type = request.form.get(\"type\")\n bounds = request.form.get(\"bounds\")\n height = float(request.form.get(\"height\"))\n width = float(request.form.get(\"width\"))\n zoom = int(request.form.get(\"zoom\"))\n # zoom = 16\n print(\" bounds:\", bounds)\n print(\" width:\", width)\n print(\" height:\", height)\n print(\" zoom:\", zoom)\n\n # cropping\n crop_tiles = False\n\n # create a map provider object\n map_object = GoogleMap(google_api_key)\n\n # divide map into tiles\n tiles, nx, ny, meters, h, w = map_object.make_tiles(bounds, crop_tiles=crop_tiles)\n tiles_overlap, nx_overlap, ny_overlap, meters_overlap, h_overlap, w_overlap = map_object.make_tiles(bounds, overlap_percent=2, crop_tiles=crop_tiles)\n print(f\" {len(tiles)} tiles, {nx} x {ny}, {meters} x {meters} m\")\n # print(\" Tile centers:\")\n # for c in tiles:\n # print(\" \",c)\n\n tiles = [t for t in tiles if ts_maps.check_tile_against_bounds(t, bounds)]\n for i, tile in enumerate(tiles):\n tile['id'] = i\n\n print(\" tiles left after viewport and polygon filter:\", len(tiles))\n\n if \"tmpdirname\" in session:\n rmtree(session['tmpdirname'], ignore_errors=True, onerror=None)\n print(\"cleaned up tmp dir\", session['tmpdirname'])\n del session['tmpdirname']\n\n # make a new tempdir name and attach to session\n tmpdir = tempfile.TemporaryDirectory()\n tmpdirname = tmpdir.name\n tmpfilename = tmpdirname[tmpdirname.rindex(\"/\")+1:]\n print(\"creating tmp dir\", tmpdirname)\n session['tmpdirname'] = tmpdirname\n tmpdir.cleanup()\n os.mkdir(tmpdirname)\n print(\"created tmp dir\", tmpdirname)\n\n # retrieve tiles and metadata if available\n meta = map_object.get_sat_maps(tiles, loop, tmpdirname, tmpfilename)\n session['metadata'] = meta\n print(\" asynchronously retrieved\", len(tiles), \"files\")\n\n # we create tiles at zoom=21, so factor the size by the current zoom\n zoom_factor = 2**21 / 2**zoom\n picHeight = 600 / zoom_factor # Resulting image height in pixels (x2 if scale parameter is set to 2)\n picWidth = 600/zoom_factor\n\n xScale = math.pow(2, zoom) / (picWidth/256)\n yScale = math.pow(2, zoom) / (picHeight/256)\n\n for i, tile in enumerate(tiles):\n tile['filename'] = tmpdirname+\"/\"+tmpfilename+str(i)+\".jpg\"\n tile['bounds'] = ts_imgutil.getImageBounds(tile['w'], tile['h'], xScale, yScale, tile['lat'], tile['lng'])\n\n if type == 'tiles':\n return json.dumps(tiles)\n elif type == 'classification':\n model_classification = Classification()\n tiles = model_classification.predict(tiles)\n return json.dumps(tiles)\n elif type == 'segmentation':\n model_classification = Classification()\n tiles = model_classification.predict(tiles)\n tiles_pred = list(filter(lambda x: x[\"prediction\"] == 1, tiles))\n if len(tiles_pred) > 0:\n model_segmentation = Segmentation()\n # our tiles for prediction are at zoom 21\n result_tiles = model_segmentation.predict(tiles_pred, 21)\n for i, tile in enumerate(tiles):\n if tile[\"id\"] in result_tiles:\n tiles[i] = result_tiles[tile[\"id\"]]\n if \"mask_url\" in tiles[i]:\n tiles[i][\"mask_url\"] = f\"/{tiles[i]['mask_url']}\"\n return json.dumps(tiles)", "def batch_export_ortho():\r\n global path_to_project\r\n \r\n for path in path_to_project:\r\n export_filename = os.path.basename(path['ProjectPath']).replace('.psz','.tif')\r\n export_path = os.path.join(export_folder,export_filename)\r\n try:\r\n project = PhotoScan.app.document\r\n project.open(path['ProjectPath'])\r\n \r\n dx, dy = mosaic.get_resolution(path['Flight_id'], path['Field'], path['Camera'])\r\n \r\n if dx is not None and dy is not None:\r\n status = project.activeChunk.exportOrthophoto(\r\n export_path, format=\"tif\", color_correction=False, blending='average', dx=dx, dy=dy,\r\n projection=project.activeChunk.projection)\r\n else:\r\n status = project.activeChunk.exportOrthophoto(export_path, format=\"tif\", color_correction=False, blending='average',projection=project.activeChunk.projection)\r\n except Exception as e:\r\n print(e)\r\n if status is True:\r\n print(\"Perfect\")\r\n app = PhotoScan.Application()\r\n app.quit()", "def test_lat_lon_to_tile(self):\n\n lat = 48\n lon = 37.7\n z = 10\n\n tile_calculated = geomath.lat_lon_to_tile(lat,lon,z)\n tile_known = (619,355,10)\n\n # make sure the tiles are the same\n self.assertEqual(tile_calculated,tile_known)", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def copy_map_files(species: list, all_names: list, specific_names: list, point_locations: dict) -> None:\n def copy_file(filename: str) -> None:\n try:\n shutil.copy2(filename, WEBOUT_PATH + \"maps/\")\n except FileNotFoundError:\n report_error(\"Missing file: \" + filename)\n\n # def scour_svg(filename: str) -> None:\n # \"\"\"\n # run scour to reduce size of svg maps\n #\n # theoretically scour could be imported and run from within the code, but it is really not designed\n # to be run that way\n # \"\"\"\n # subprocess.Popen(\"scour -i \" + filename + \" -o \" + filename + \"z\").wait()\n\n # individual species maps\n for s in species:\n if s.status != \"fossil\":\n copy_file(TMP_MAP_PATH + rangemap_name(\"u_\" + s.species) + \".kmz\")\n copy_file(TMP_MAP_PATH + pointmap_name(\"u_\" + s.species) + \".kmz\")\n # scour_svg(TMP_MAP_PATH + rangemap_name(\"u_\" + s.species) + \".svg\")\n # scour_svg(TMP_MAP_PATH + pointmap_name(\"u_\" + s.species) + \".svg\")\n # copy_file(TMP_MAP_PATH + rangemap_name(\"u_\" + s.species) + \".svgz\")\n # copy_file(TMP_MAP_PATH + pointmap_name(\"u_\" + s.species) + \".svgz\")\n copy_file(TMP_MAP_PATH + rangemap_name(\"u_\" + s.species) + \".png\")\n copy_file(TMP_MAP_PATH + pointmap_name(\"u_\" + s.species) + \".png\")\n # combined map\n # copy_file(TMP_MAP_PATH + rangemap_name(\"fiddlers_all\") + \".kmz\")\n copy_file(TMP_MAP_PATH + pointmap_name(\"fiddlers_all\") + \".kmz\")\n copy_file(TMP_MAP_PATH + rangemap_name(\"fiddlers_all\") + \".png\")\n copy_file(TMP_MAP_PATH + pointmap_name(\"fiddlers_all\") + \".png\")\n\n # binomial maps\n for n in all_names:\n copy_file(TMP_MAP_PATH + pointmap_name(\"name_\" + name_to_filename(n)) + \".kmz\")\n # scour_svg(TMP_MAP_PATH + pointmap_name(\"name_\" + name_to_filename(n)) + \".svg\")\n # copy_file(TMP_MAP_PATH + pointmap_name(\"name_\" + name_to_filename(n)) + \".svgz\")\n copy_file(TMP_MAP_PATH + pointmap_name(\"name_\" + name_to_filename(n)) + \".png\")\n # specific name maps\n for n in specific_names:\n copy_file(TMP_MAP_PATH + pointmap_name(\"sn_\" + n.name) + \".kmz\")\n # scour_svg(TMP_MAP_PATH + pointmap_name(\"sn_\" + n.name) + \".svg\")\n # copy_file(TMP_MAP_PATH + pointmap_name(\"sn_\" + n.name) + \".svgz\")\n copy_file(TMP_MAP_PATH + pointmap_name(\"sn_\" + n.name) + \".png\")\n # point location maps\n for p in point_locations:\n if not point_locations[p].unknown:\n copy_file(TMP_MAP_PATH + pointmap_name(\"location_\" + place_to_filename(p)) + \".kmz\")", "def single_threaded_tiling(input_file, output_folder, options):\n if options.verbose:\n print(\"Begin tiles details calc\")\n conf, tile_details = worker_tile_details(input_file, output_folder, options)\n\n if options.verbose:\n print(\"Tiles details calc complete.\")\n\n if not options.verbose and not options.quiet:\n progress_bar = ProgressBar(len(tile_details))\n progress_bar.start()\n\n for tile_detail in tile_details:\n create_base_tile(conf, tile_detail, options)\n\n if not options.verbose and not options.quiet:\n progress_bar.log_progress()\n\n create_overview_tiles(conf, output_folder, options)\n\n shutil.rmtree(os.path.dirname(conf.src_file))", "def get_modis(tiles, save_path, months='', years=''):\n\n\n from bs4 import BeautifulSoup\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n\n mons = [str(i).zfill(2) for i in range(months[0], months[1] + 1)]\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n\n for tile in tiles:\n for yr in yrs:\n for m in mons:\n base_url = \"http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16A2_MONTHLY.MERRA_GMAO_1kmALB/\"\n\n dir_path = \"Y{:}/M{:}/\".format(yr, m)\n url = base_url + dir_path\n soup = BeautifulSoup(urllib2.urlopen(url), \"lxml\")\n hdf_name = soup.find_all('', {\n 'href': re.compile('MOD16A2.A{:}M{:}.{:}.105'.format(yr, m, tile), re.IGNORECASE)})\n files = urllib.urlretrieve(url + hdf_name[0].text, save_path + hdf_name[0].text)\n print(save_path + hdf_name[0].text)\n time.sleep(0.5)", "def _make_files(self):\n if not self.path.is_dir():\n raise FileNotFoundError(f\"Path {self.path} does not exist.\")\n\n # Make the filepaths\n self.file_points = self.path / \"point.dat\"\n self.file_lines = self.path / \"line.dat\"\n self.file_cadastre = self.path / \"cadastre.dat\"\n self.file_portals = self.path / \"portals.dat\"\n\n with open(self.file_points, \"w\") as f:\n # 2 lines ignored\n header = datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n f.write(header)\n self.points_dfs = []\n with open(self.file_lines, \"w\") as f:\n # 5 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + 3 * \"Generated: \\n\"\n + \"Name,Section,source_group,x1,y1,z1,x2,y2,z2,width,vert. ext.,-,-,\"\n \"emission_rate[kg/h/km],-,-,-,-\\n\"\n )\n f.write(header)\n with open(self.file_cadastre, \"w\") as f:\n # 1 line ignored\n header = \"x,y,z,dx,dy,dz,emission_rate[kg/h],-,-,-,source_group\\n\"\n f.write(header)\n with open(self.file_portals, \"w\") as f:\n # 2 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + \"x1,y1,x2,y2,z0,z1,emission_rate[kg/h],-,-,-,source_group\\n\"\n )\n f.write(header)\n\n\n # File to save the source groups values\n self.file_source_groups = self.path / \"source_groups.json\"\n with open(self.file_source_groups, \"w\") as f:\n # reverse the dict (items become keys and vice versa)\n reversed_source_groups = {v: k for k, v in self.source_groups.items()}\n json.dump(reversed_source_groups, f, indent=2)", "def gen_test_output(sess, logits, image_folder, image_pl, data_folder,\n learning_phase, image_shape, nw_shape):\n image_paths = glob(os.path.join(data_folder, image_folder, '*.png'))\n for image_file in image_paths[:5]:\n\n in_image = scipy.misc.imread(image_file, mode='RGB')\n image = scipy.misc.imresize(in_image, image_shape)\n\n street_im = get_seg_img(sess, logits, image_pl, image, image_shape,\n nw_shape, learning_phase)\n\n street_im = scipy.misc.imresize(street_im, in_image.shape)\n yield os.path.basename(image_file), np.array(street_im)", "def write_shapefile_combined(self, shpname):\r\n self.read_traveltime()\r\n \r\n westlats1 = []\r\n westlons1 = []\r\n eastlats1 = []\r\n eastlons1 = [] \r\n lines1 = []\r\n #### points ####\r\n lats1 = []\r\n lons1 = []\r\n for i in range(len(self.westPnts1)):\r\n westlat1, westlon1 = utm.to_latlon(self.westPnts1[i,0], self.westPnts1[i,1], 14, 'U')\r\n eastlat1, eastlon1 = utm.to_latlon(self.eastPnts1[i,0], self.eastPnts1[i,1], 14, 'U')\r\n lines1.append([[westlon1, westlat1], [eastlon1, eastlat1]])\r\n westlats1.append(westlat1)\r\n westlons1.append(westlon1)\r\n eastlats1.append(eastlat1)\r\n eastlons1.append(eastlon1)\r\n #### points ####\r\n lats1.append((westlat1+eastlat1)/2.)\r\n lons1.append((westlon1+eastlon1)/2.)\r\n \r\n \r\n \r\n \r\n \r\n westlats5 = []\r\n westlons5 = []\r\n eastlats5 = []\r\n eastlons5 = [] \r\n lines5 = []\r\n #### points ####\r\n lats5 = []\r\n lons5 = []\r\n for i in range(len(self.westPnts5)):\r\n westlat5, westlon5 = utm.to_latlon(self.westPnts5[i,0], self.westPnts5[i,1], 14, 'U')\r\n eastlat5, eastlon5 = utm.to_latlon(self.eastPnts5[i,0], self.eastPnts5[i,1], 14, 'U')\r\n lines5.append([[westlon5, westlat5], [eastlon5, eastlat5]])\r\n westlats5.append(westlat5)\r\n westlons5.append(westlon5)\r\n eastlats5.append(eastlat5)\r\n eastlons5.append(eastlon5)\r\n #### points ####\r\n lats5.append((westlat5+eastlat5)/2.)\r\n lons5.append((westlon5+eastlon5)/2.)\r\n \r\n \r\n Narray_branch1 = len(self.inarrays_branch1)\r\n Narray_branch5 = len(self.inarrays_branch5)\r\n \r\n #### travel time for branch 1\r\n Ttime = self.inarrays_branch1[0][:,2]\r\n ind0 = np.nonzero(Ttime)[0][0]\r\n ind = np.arange(ind0, Ttime.shape[0])\r\n \r\n\r\n branchIDs_branch1 = []\r\n SegIDs_branch1 = []\r\n lines_branch1 = []\r\n westlats_branch1 = []\r\n westlons_branch1 = []\r\n eastlats_branch1 = []\r\n eastlons_branch1 = []\r\n lats_branch1 = []\r\n lons_branch1 = []\r\n Ttimes_branch1 = []\r\n Density_branch1 = []\r\n Initial_loc_branch1 = []\r\n solubility_branch1 = []\r\n flow_condition_branch1 = []\r\n concentration_branch1 = []\r\n water_level_branch1 = []\r\n dist_branch1 = []\r\n \r\n \r\n \r\n for iarray in range(Narray_branch1):\r\n \r\n #### find indexes which segment has travel time\r\n Ttime_tem = self.inarrays_branch1[iarray][:,2]\r\n ind0 = np.nonzero(Ttime_tem)[0][0]\r\n ind = np.arange(ind0, Ttime_tem.shape[0])\r\n \r\n for i in range(self.inarrays_branch1[0].shape[0]):\r\n \r\n if i in ind:\r\n branchIDs_branch1.append(self.inarrays_branch1[iarray][i,0])\r\n SegIDs_branch1.append(self.inarrays_branch1[iarray][i,1])\r\n lines_branch1.append(lines1[i])\r\n westlats_branch1.append(westlats1[i])\r\n westlons_branch1.append(westlons1[i])\r\n eastlats_branch1.append(eastlats1[i])\r\n eastlons_branch1.append(eastlons1[i])\r\n lats_branch1.append(lats1[i])\r\n lons_branch1.append(lons1[i])\r\n \r\n Ttimes_branch1.append(self.inarrays_branch1[iarray][i,2])\r\n if self.inarrays_branch1[iarray][i,3] == 0:\r\n Density_branch1.append('Light')\r\n elif self.inarrays_branch1[iarray][i,3] == 1:\r\n Density_branch1.append('Heavy')\r\n elif self.inarrays_branch1[iarray][i,3] == 9:\r\n Density_branch1.append('None')\r\n \r\n Initial_loc_branch1.append('East')\r\n \r\n if self.inarrays_branch1[iarray][i,5] == 0:\r\n solubility_branch1.append('Insoluble')\r\n elif self.inarrays_branch1[iarray][i,5] == 1:\r\n solubility_branch1.append('Soluble')\r\n \r\n if self.inarrays_branch1[iarray][i,6] == 3:\r\n #flow_condition_branch1.append('High')\r\n flow_condition_branch1.append('> 945 cfs')\r\n elif self.inarrays_branch1[iarray][i,6] == 2:\r\n #flow_condition_branch1.append('Medium')\r\n flow_condition_branch1.append('110 ~ 945 cfs')\r\n elif self.inarrays_branch1[iarray][i,6] == 1:\r\n #flow_condition_branch1.append('Low')\r\n flow_condition_branch1.append('< 110 cfs')\r\n \r\n if self.inarrays_branch1[iarray][i,7] != 0:\r\n concentration_branch1.append(\"{:.3E}\".format(Decimal(self.inarrays_branch1[iarray][i,7])))\r\n else:\r\n concentration_branch1.append(str(self.inarrays_branch1[iarray][i,7]))\r\n \r\n water_level_branch1.append(self.inarrays_branch1[iarray][i,8])\r\n dist_branch1.append(self.inarrays_branch1[iarray][i,9])\r\n \r\n #### travel time for branch 5\r\n #Ttime = self.inarrays_particle_branch5[0][:,2]\r\n #ind1 = np.arange(43, 45) -1 #### hard coded, for release in branch 5\r\n #ind5 = np.nonzero(Ttime)[0]\r\n \r\n \r\n branchIDs_branch5 = []\r\n SegIDs_branch5 = []\r\n lines_branch5 = []\r\n westlats_branch5 = []\r\n westlons_branch5 = []\r\n eastlats_branch5 = []\r\n eastlons_branch5 = []\r\n lats_branch5 = []\r\n lons_branch5 = []\r\n Ttimes_branch5 = []\r\n Density_branch5 = []\r\n Initial_loc_branch5 = []\r\n solubility_branch5 = []\r\n flow_condition_branch5 = []\r\n concentration_branch5 = []\r\n water_level_branch5 = []\r\n dist_branch5 = []\r\n \r\n \r\n ## loop over all travel time for each array, find which is in branch 1 and which is in branch 5\r\n for iarray in range(Narray_branch5): \r\n \r\n #### find indexes which segment has travel time\r\n Ttime_tem = self.inarrays_branch5[iarray][:,2]\r\n \r\n nbr5 = len(lines5) ## number of elements in branch 5\r\n ind1 = np.arange(43, 45) -1 + nbr5 #### hard coded, for release in branch 5 len(branch5)+ [43,44] - 1\r\n ind5 = np.nonzero(Ttime_tem)[0]\r\n \r\n for i in range(self.inarrays_branch5[0].shape[0]):\r\n #if iarray==6 and i == 44:\r\n # pdb.set_trace()\r\n if self.inarrays_branch5[iarray][i,0] == 5: ## at branch 5\r\n \r\n if i in ind5:\r\n branchIDs_branch5.append(self.inarrays_branch5[iarray][i,0])\r\n SegIDs_branch5.append(self.inarrays_branch5[iarray][i,1])\r\n lines_branch5.append(lines5[i])\r\n westlats_branch5.append(westlats5[i])\r\n westlons_branch5.append(westlons5[i])\r\n eastlats_branch5.append(eastlats5[i])\r\n eastlons_branch5.append(eastlons5[i])\r\n lats_branch5.append(lats5[i])\r\n lons_branch5.append(lons5[i])\r\n \r\n Ttimes_branch5.append(self.inarrays_branch5[iarray][i,2])\r\n if self.inarrays_branch5[iarray][i,3] == 0:\r\n Density_branch5.append('Light')\r\n elif self.inarrays_branch5[iarray][i,3] == 1:\r\n Density_branch5.append('Heavy')\r\n elif self.inarrays_branch5[iarray][i,3] == 9:\r\n Density_branch5.append('None')\r\n \r\n if self.inarrays_branch5[iarray][i,4] == 1:\r\n Initial_loc_branch5.append('East')\r\n elif self.inarrays_branch5[iarray][i,4] == 5:\r\n Initial_loc_branch5.append('West')\r\n \r\n if self.inarrays_branch5[iarray][i,5] == 0:\r\n solubility_branch5.append('Insoluble')\r\n elif self.inarrays_branch5[iarray][i,5] == 1:\r\n solubility_branch5.append('Soluble')\r\n \r\n if self.inarrays_branch5[iarray][i,6] == 3:\r\n flow_condition_branch5.append('> 945 cfs')\r\n elif self.inarrays_branch5[iarray][i,6] == 2:\r\n flow_condition_branch5.append('110 ~ 945 cfs')\r\n elif self.inarrays_branch5[iarray][i,6] == 1:\r\n flow_condition_branch5.append('< 110 cfs')\r\n \r\n if self.inarrays_branch5[iarray][i,7] != 0:\r\n concentration_branch5.append(\"{:.3E}\".format(Decimal(self.inarrays_branch5[iarray][i,7])))\r\n else: \r\n concentration_branch5.append(str(self.inarrays_branch5[iarray][i,7]))\r\n \r\n water_level_branch5.append(self.inarrays_branch5[iarray][i,8])\r\n dist_branch5.append(self.inarrays_branch5[iarray][i,9])\r\n \r\n \r\n elif self.inarrays_branch5[iarray][i,0] == 1: ## at branch 1\r\n \r\n if i in ind1:\r\n \r\n branchIDs_branch5.append(self.inarrays_branch5[iarray][i,0])\r\n SegIDs_branch5.append(self.inarrays_branch5[iarray][i,1])\r\n lines_branch5.append(lines1[i-nbr5])\r\n westlats_branch5.append(westlats1[i-nbr5])\r\n westlons_branch5.append(westlons1[i-nbr5])\r\n eastlats_branch5.append(eastlats1[i-nbr5])\r\n eastlons_branch5.append(eastlons1[i-nbr5])\r\n lats_branch5.append(lats1[i-nbr5])\r\n lons_branch5.append(lons1[i-nbr5])\r\n \r\n Ttimes_branch5.append(self.inarrays_branch5[iarray][i,2])\r\n if self.inarrays_branch5[iarray][i,3] == 0:\r\n Density_branch5.append('Light')\r\n elif self.inarrays_branch5[iarray][i,3] == 1:\r\n Density_branch5.append('Heavy')\r\n elif self.inarrays_branch5[iarray][i,3] == 9:\r\n Density_branch5.append('None')\r\n \r\n if self.inarrays_branch5[iarray][i,4] == 1:\r\n Initial_loc_branch5.append('East')\r\n elif self.inarrays_branch5[iarray][i,4] == 5:\r\n Initial_loc_branch5.append('West')\r\n \r\n if self.inarrays_branch5[iarray][i,5] == 0:\r\n solubility_branch5.append('Insoluble')\r\n elif self.inarrays_branch5[iarray][i,5] == 1:\r\n solubility_branch5.append('Soluble')\r\n \r\n if self.inarrays_branch5[iarray][i,6] == 3:\r\n flow_condition_branch5.append('> 945 cfs')\r\n elif self.inarrays_branch5[iarray][i,6] == 2:\r\n flow_condition_branch5.append('110 ~ 945 cfs')\r\n elif self.inarrays_branch5[iarray][i,6] == 1:\r\n flow_condition_branch5.append('< 110 cfs')\r\n \r\n if self.inarrays_branch5[iarray][i,7] != 0:\r\n concentration_branch5.append(\"{:.3E}\".format(Decimal(self.inarrays_branch5[iarray][i,7])))\r\n else:\r\n concentration_branch5.append(str(self.inarrays_branch5[iarray][i,7]))\r\n \r\n water_level_branch5.append(self.inarrays_branch5[iarray][i,8])\r\n dist_branch5.append(self.inarrays_branch5[iarray][i,9])\r\n \r\n\r\n #### combine all data into one big array\r\n branchIDs_combined = branchIDs_branch1 + branchIDs_branch5\r\n SegIDs_combined = SegIDs_branch1 + SegIDs_branch5 \r\n lines_combined = lines_branch1 + lines_branch5\r\n# westlats_combined = westlats_branch1 + westlats_branch5\r\n# westlons_combined = westlons_branch1 + westlons_branch5\r\n# eastlats_combined = eastlats_branch1 + eastlats_branch5\r\n# eastlons_combined = eastlons_branch1 + eastlons_branch5\r\n lats_combined = lats_branch1 + lats_branch5\r\n lons_combined = lons_branch1 + lons_branch5\r\n \r\n Ttimes_combined = Ttimes_branch1 + Ttimes_branch5\r\n Density_combined = Density_branch1 + Density_branch5\r\n Initial_loc_combined = Initial_loc_branch1 + Initial_loc_branch5\r\n solubility_combined = solubility_branch1 + solubility_branch5\r\n flow_combined = flow_condition_branch1 + flow_condition_branch5\r\n concentration_combined = concentration_branch1 + concentration_branch5 \r\n water_level_combined = water_level_branch1 + water_level_branch5 \r\n dist_combined = dist_branch1 + dist_branch5\r\n \r\n #### Create the shapefile\r\n # Create the projection\r\n spatialReference = osgeo.osr.SpatialReference()\r\n spatialReference.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')\r\n \r\n # Create the shape file\r\n outfile = r'ArcGIS_online\\%s'%shpname\r\n driver = osgeo.ogr.GetDriverByName('ESRI Shapefile')\r\n shapeData = driver.CreateDataSource(outfile)\r\n \r\n # Create the layer\r\n layer = shapeData.CreateLayer('Contour', spatialReference, osgeo.ogr.wkbPoint)\r\n layerDefinition = layer.GetLayerDefn()\r\n \r\n # Create fields containing segment infos\r\n field_def = osgeo.ogr.FieldDefn('BranchID', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('SegID', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lon', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lat', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n #field_def = osgeo.ogr.FieldDefn('Lon_east', osgeo.ogr.OFTReal)\r\n #layer.CreateField(field_def)\r\n \r\n #field_def = osgeo.ogr.FieldDefn('Lat_east', osgeo.ogr.OFTReal)\r\n #layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('T (day)', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n ## density - type: string, option: light-0, heavey-1 \r\n field_def = osgeo.ogr.FieldDefn('Density', osgeo.ogr.OFTString)\r\n layer.CreateField(field_def)\r\n \r\n ## initial release location - type: string, option: East-1, West-5\r\n field_def = osgeo.ogr.FieldDefn('Initial', osgeo.ogr.OFTString)\r\n layer.CreateField(field_def)\r\n \r\n ## solubility\r\n field_def = osgeo.ogr.FieldDefn('Solubility', osgeo.ogr.OFTString)\r\n layer.CreateField(field_def)\r\n \r\n ## flow condition\r\n field_def = osgeo.ogr.FieldDefn('Flow', osgeo.ogr.OFTString)\r\n layer.CreateField(field_def)\r\n \r\n ## concentration\r\n field_def = osgeo.ogr.FieldDefn('C (mg/L)', osgeo.ogr.OFTString)\r\n layer.CreateField(field_def)\r\n \r\n ## water surface elevation\r\n field_def = osgeo.ogr.FieldDefn('WSE (ft)', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n ## distance to WTP gate\r\n field_def = osgeo.ogr.FieldDefn('D (ft)', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n \r\n def add_feature(layer, branchID, segs, lines, lon, lat, Ttime, density, Initial_loc, solubility, flows, concentration, water_level, dist):\r\n \"\"\"\r\n function that adds feature to layer\r\n \"\"\" \r\n ctr=0\r\n for i in range(len(lines)):\r\n ctr+=1\r\n point = osgeo.ogr.Geometry(osgeo.ogr.wkbPoint)\r\n # Add points individually to the line\r\n #xy = lines[i]\r\n \r\n #line.AddPoint_2D(xy[0][0],xy[0][1])\r\n #line.AddPoint_2D(xy[1][0],xy[1][1])\r\n point.AddPoint(lon[i], lat[i])\r\n # Update the feature with the line data\r\n featureIndex = ctr\r\n feature = osgeo.ogr.Feature(layerDefinition)\r\n #feature.SetStyleString(\"PEN(c:r,w:5px)\") \r\n feature.SetGeometry(point)\r\n feature.SetFID(featureIndex)\r\n feature.SetGeometryDirectly(point)\r\n \r\n # Set the attribute table\r\n feature.SetField('BranchID', int(branchID[i])) \r\n feature.SetField('SegID', int(segs[i])) # convert to int() is necessary, osgeo cannot recognize numpy int32 type\r\n feature.SetField('Lon', \"{:.3f}\".format(lon[i]))\r\n feature.SetField('Lat', \"{:.3f}\".format(lat[i]))\r\n #feature.SetField('Lon_east', \"{:.3f}\".format(eastlon[i]))\r\n #feature.SetField('Lat_east', \"{:.3f}\".format(eastlat[i]))\r\n feature.SetField('T (day)', int(Ttime[i]))\r\n feature.SetField('Density', density[i])\r\n feature.SetField('Initial', Initial_loc[i])\r\n feature.SetField('Solubility', solubility[i])\r\n feature.SetField('Flow', flows[i])\r\n feature.SetField('C (mg/L)', concentration[i])\r\n feature.SetField('WSE (ft)', water_level[i])\r\n feature.SetField('D (ft)', dist[i])\r\n \r\n layer.CreateFeature(feature)\r\n \r\n \r\n add_feature(layer, branchIDs_combined, SegIDs_combined, lines_combined, \\\r\n lons_combined, lats_combined,\\\r\n Ttimes_combined, Density_combined, Initial_loc_combined, solubility_combined, \\\r\n flow_combined, concentration_combined, water_level_combined, dist_combined)", "def img_to_tiles(cls, tiff_path, region, res, tile, tile_date_path, img_format, mp):\n\n # Get metadata from original image\n metadata = TiffMetadata(tiff_path)\n\n WIDTH, HEIGHT = region.calculate_width_height(res)\n ultra_large = False\n if WIDTH * HEIGHT > 2 * Image.MAX_IMAGE_PIXELS:\n ultra_large = True\n\n # Use the following dictionary to get the coordinates of each tile\n geoTran_d = TileUtils.getGeoTransform(tiff_path)\n\n # Check for valid tiling dimensions\n if (tile.width > WIDTH or tile.height > HEIGHT):\n raise argparse.ArgumentTypeError(\"Tiling dimensions greater than image dimensions\")\n\n # Determine the number of tiles per row and column\n if tile.handling == Handling.discard_incomplete_tiles:\n num_rows = (HEIGHT - tile.height * tile.overlap) // (tile.height * (1 - tile.overlap))\n num_cols = (WIDTH - tile.width * tile.overlap) // (tile.width * (1 - tile.overlap))\n else:\n num_rows = math.ceil((HEIGHT - tile.height * tile.overlap) / (tile.height * (1 - tile.overlap)))\n num_cols = math.ceil((WIDTH - tile.width * tile.overlap) / (tile.width * (1 - tile.overlap)))\n\n num_iterations = num_rows * num_cols\n \n # Find the pixel coordinate extents of each tile to be generated\n print(\"Gathering tiling information...\", end=\"\", flush=True)\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((metadata, tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols)), list(range(num_iterations)))\n pixel_coords = pool.map(getTilingSplitCoordsMP, args)\n else:\n pixel_coords = []\n for index in range(num_iterations):\n pixel_coords.append(getTilingSplitCoordsTuple(metadata,tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols, index))\n print(\"done!\")\n\n if mp:\n print(\"Generating {} tiles using {} processes...\".format(len(pixel_coords), NUM_CORES), flush=True)\n else:\n print(\"Generating {} tiles sequentially...\".format(len(pixel_coords)), flush=True)\n\n if ultra_large: \n # Create the intermediate tiles\n inter_dir, img_width, img_height = TileUtils.img_to_intermediate_images(tiff_path, tile, WIDTH, HEIGHT, metadata.date, img_format)\n\n # Add each coordinate to its proper list\n intermediate_files = [f for f in os.listdir(inter_dir) if f.endswith(img_format)]\n\n # Get the tiling information for all intermediate tiles\n intermediate_info = TileUtils.getIntermediateTilingInfo(tile, pixel_coords, WIDTH, HEIGHT, img_width, img_height, intermediate_files)\n\n # Tile the complete images\n print(\"\\tTiling from complete images\")\n for single_inter_imgs in tqdm(intermediate_info[0]):\n filename = single_inter_imgs[0][0]\n inter_metadata = IntermediateMetadata(filename)\n\n img_path = os.path.join(inter_dir, filename)\n src = Image.open(img_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n \n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format,), kwds={\"inter_x\":(x - inter_metadata.start_x), \"inter_y\":(y - inter_metadata.start_y)}) for (filename, x, y, done_x, done_y, path) in single_inter_imgs]\n f = [p.get() for p in multi]\n pool.close()\n pool.join()\n else: \n for filename, x, y, done_x, done_y, path in single_inter_imgs:\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, inter_x=(x - inter_metadata.start_x), inter_y=(y - inter_metadata.start_y), img_arr=img_arr)\n\n # Close the image\n src.close()\n # Tile in between two images\n print(\"\\tTiling between two images\")\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[1])\n result = list(tqdm(pool.imap(processDoublesMP, args), total=len(intermediate_info[1])))\n else:\n for double_inter_imgs in tqdm(intermediate_info[1]):\n processDoublesTuple(tile.width, tile.height, inter_dir, img_format, double_inter_imgs)\n \n # Tile in between four images\n print(\"\\tTiling between four images\")\n if mp:\n # Use half as many processes as cores to ensure not running out of available mem and getting stuck\n with Pool(processes=(NUM_CORES // 2)) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[2])\n result = list(tqdm(pool.imap(processQuadsMP, args), total=len(intermediate_info[2])))\n else:\n for quad_inter_imgs in tqdm(intermediate_info[2]):\n processQuadsTuple(tile.width, tile.height, inter_dir, img_format, quad_inter_imgs)\n shutil.rmtree(inter_dir)\n else: \n # Open image as a numpy array in order to tile from the array\n src = Image.open(tiff_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n\n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format)) for (x, y, done_x, done_y, path) in pixel_coords]\n f = [p.get() for p in tqdm(multi)]\n pool.close()\n pool.join()\n else:\n for x, y, done_x, done_y, path in tqdm(pixel_coords):\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, img_arr=img_arr)\n \n # Close the image\n src.close()\n print(\"done!\")", "def _save_tracker_output(seq: Sequence, tracker: Tracker, output: dict):\n\n if not os.path.exists(tracker.results_dir):\n print(\"create tracking result dir:\", tracker.results_dir)\n os.makedirs(tracker.results_dir)\n if seq.dataset in ['trackingnet', 'got10k']:\n if not os.path.exists(os.path.join(tracker.results_dir, seq.dataset)):\n os.makedirs(os.path.join(tracker.results_dir, seq.dataset))\n '''2021.1.5 create new folder for these two datasets'''\n if seq.dataset in ['trackingnet', 'got10k']:\n base_results_path = os.path.join(tracker.results_dir, seq.dataset, seq.name)\n else:\n base_results_path = os.path.join(tracker.results_dir, seq.name)\n\n def save_bb(file, data):\n tracked_bb = np.array(data).astype(int)\n np.savetxt(file, tracked_bb, delimiter='\\t', fmt='%d')\n\n def save_time(file, data):\n exec_times = np.array(data).astype(float)\n np.savetxt(file, exec_times, delimiter='\\t', fmt='%f')\n\n def save_score(file, data):\n scores = np.array(data).astype(float)\n np.savetxt(file, scores, delimiter='\\t', fmt='%.2f')\n\n def _convert_dict(input_dict):\n data_dict = {}\n for elem in input_dict:\n for k, v in elem.items():\n if k in data_dict.keys():\n data_dict[k].append(v)\n else:\n data_dict[k] = [v, ]\n return data_dict\n\n for key, data in output.items():\n # If data is empty\n if not data:\n continue\n\n if key == 'target_bbox':\n if isinstance(data[0], (dict, OrderedDict)):\n data_dict = _convert_dict(data)\n\n for obj_id, d in data_dict.items():\n bbox_file = '{}_{}.txt'.format(base_results_path, obj_id)\n save_bb(bbox_file, d)\n else:\n # Single-object mode\n bbox_file = '{}.txt'.format(base_results_path)\n save_bb(bbox_file, data)\n\n if key == 'all_boxes':\n if isinstance(data[0], (dict, OrderedDict)):\n data_dict = _convert_dict(data)\n\n for obj_id, d in data_dict.items():\n bbox_file = '{}_{}_all_boxes.txt'.format(base_results_path, obj_id)\n save_bb(bbox_file, d)\n else:\n # Single-object mode\n bbox_file = '{}_all_boxes.txt'.format(base_results_path)\n save_bb(bbox_file, data)\n\n if key == 'all_scores':\n if isinstance(data[0], (dict, OrderedDict)):\n data_dict = _convert_dict(data)\n\n for obj_id, d in data_dict.items():\n bbox_file = '{}_{}_all_scores.txt'.format(base_results_path, obj_id)\n save_score(bbox_file, d)\n else:\n # Single-object mode\n print(\"saving scores...\")\n bbox_file = '{}_all_scores.txt'.format(base_results_path)\n save_score(bbox_file, data)\n\n elif key == 'time':\n if isinstance(data[0], dict):\n data_dict = _convert_dict(data)\n\n for obj_id, d in data_dict.items():\n timings_file = '{}_{}_time.txt'.format(base_results_path, obj_id)\n save_time(timings_file, d)\n else:\n timings_file = '{}_time.txt'.format(base_results_path)\n save_time(timings_file, data)", "def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)", "def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))", "def simulation_export(request, simulation):\n\n seed = np.random.randint(10000)\n dir = '{0}/website_files/exports/{1}'.format(settings.BASE_DIR, seed)\n os.makedirs(dir)\n\n files_names = []\n\n files_names.append(object_export_save(simulation, 'centroid', dir))\n files_names.append(object_export_save(simulation, 'crossing', dir))\n files_names.append(object_export_save(simulation, 'link', dir))\n files_names.append(object_export_save(simulation, 'function', dir))\n files_names.append(public_transit_export_save(simulation, dir))\n files_names.append(pricing_export_save(simulation, dir))\n\n\n demandsegments = get_query('demandsegment', simulation)\n for demandsegment in demandsegments:\n files_names.append(matrix_export_save(simulation, demandsegment, dir))\n\n\n\n #Need to add parameters file here\n\n zipname = '{0}'.format(str(simulation))\n\n s = BytesIO()\n\n file = zipfile.ZipFile(s, 'w')\n\n for f in files_names:\n # Calculate path for file in zip\n fdir, fname = os.path.split(f)\n zip_path = os.path.join(zipname, fname)\n\n # Add file, at correct path\n file.write(f, zip_path)\n\n file.close()\n\n # Grab ZIP file from in-memory, make response with correct MIME-type\n response = HttpResponse(s.getvalue())\n response['content_type'] = 'application/x-zip-compressed'\n # ..and correct content-disposition\n response['Content-Disposition'] = 'attachment; filename={0}.zip'.format(str(simulation))\n\n shutil.rmtree(dir, ignore_errors=True)\n\n return response", "def create_temp_output_paths() -> None:\n if not os.path.exists(TMP_PATH):\n os.makedirs(TMP_PATH)\n if not os.path.exists(TMP_MAP_PATH):\n os.makedirs(TMP_MAP_PATH)", "def write_multfile(image_coords, source_z, file_name = 'multfile.in'):\n print 'write_multfile'\n file_in = open(file_name, 'w')\n file_in.write('#REFERENCE 3 0.0 0.0\\n')\n\n for i in range(len(image_coords)):\n image_id = 'A' + str(i+1) + ' '\n data = str(image_coords[i][0]) + ' ' + str(image_coords[i][1]) \\\n + str(' 0.2 0.2 0 ') + str(source_z) + ' 0'\n final = image_id + data + '\\n'\n file_in.write(final)\n file_in.close()", "def build():\n if not os.path.exists(ZIP_DATASET_PATH):\n raise RuntimeError('Unable to find the zip dataset at %s' % ZIP_DATASET_PATH)\n\n # Extracting\n extract_dir = os.path.join(os.path.dirname(ZIP_DATASET_PATH), 'zip_dataset')\n if not os.path.exists(extract_dir):\n LOGGER.info('... Extracting files from zip dataset.')\n with zipfile.ZipFile(ZIP_DATASET_PATH, 'r') as zip_dataset:\n zip_dataset.extractall(extract_dir)\n\n # Additional information we also want to store\n map_object = Map()\n all_powers = get_map_powers(map_object)\n sc_to_win = len(map_object.scs) // 2 + 1\n\n hash_table = {} # zobrist_hash: [{game_id}/{phase_name}]\n moves = {} # Moves frequency: {move: [nb_no_press, nb_press]}\n nb_phases = OrderedDict() # Nb of phases per game\n end_scs = {'press': {power_name: {nb_sc: [] for nb_sc in range(0, sc_to_win + 1)} for power_name in all_powers},\n 'no_press': {power_name: {nb_sc: [] for nb_sc in range(0, sc_to_win + 1)} for power_name in all_powers}}\n\n # Building\n dataset_index = {}\n LOGGER.info('... Building HDF5 dataset.')\n with multiprocessing.Pool() as pool:\n with h5py.File(DATASET_PATH, 'w') as hdf5_dataset, open(PROTO_DATASET_PATH, 'wb') as proto_dataset:\n\n for json_file_path in glob.glob(extract_dir + '/*.jsonl'):\n LOGGER.info('... Processing: %s', json_file_path)\n category = json_file_path.split('/')[-1].split('.')[0]\n dataset_index[category] = set()\n\n # Processing file using pool\n with open(json_file_path, 'r') as json_file:\n lines = json_file.read().splitlines()\n for game_id, saved_game_zlib in tqdm(pool.imap_unordered(process_game, lines), total=len(lines)):\n if game_id is None:\n continue\n saved_game_proto = zlib_to_proto(saved_game_zlib, SavedGameProto)\n\n # Saving to disk\n hdf5_dataset[game_id] = np.void(saved_game_zlib)\n write_proto_to_file(proto_dataset, saved_game_proto, compressed=False)\n dataset_index[category].add(game_id)\n\n # Recording additional info\n get_end_scs_info(saved_game_proto, game_id, all_powers, sc_to_win, end_scs)\n get_moves_info(saved_game_proto, moves)\n nb_phases[game_id] = len(saved_game_proto.phases)\n\n # Recording hash of each phase\n for phase in saved_game_proto.phases:\n hash_table.setdefault(phase.state.zobrist_hash, [])\n hash_table[phase.state.zobrist_hash] += ['%s/%s' % (game_id, phase.name)]\n\n # Storing info to disk\n with open(DATASET_INDEX_PATH, 'wb') as file:\n pickle.dump(dataset_index, file, pickle.HIGHEST_PROTOCOL)\n with open(END_SCS_DATASET_PATH, 'wb') as file:\n pickle.dump(end_scs, file, pickle.HIGHEST_PROTOCOL)\n with open(HASH_DATASET_PATH, 'wb') as file:\n pickle.dump(hash_table, file, pickle.HIGHEST_PROTOCOL)\n with open(MOVES_COUNT_DATASET_PATH, 'wb') as file:\n pickle.dump(moves, file, pickle.HIGHEST_PROTOCOL)\n with open(PHASES_COUNT_DATASET_PATH, 'wb') as file:\n pickle.dump(nb_phases, file, pickle.HIGHEST_PROTOCOL)\n\n # Deleting extract_dir\n LOGGER.info('... Deleting extracted files.')\n if os.path.exists(extract_dir):\n shutil.rmtree(extract_dir, ignore_errors=True)\n LOGGER.info('... Done building HDF5 dataset.')", "def download_map_area():\n filename = \"data.osm\"\n if CONFIG[\"SELECTION\"] == \"CACHE\":\n if not os.path.exists(filename):\n raise ValueError(\"Cannot use SELECTION=CACHE if no {} file exists.\".format(filename))\n else:\n return None, filename, os.path.getsize(filename)\n elif CONFIG[\"SELECTION\"] == \"PRESELECTED\":\n data = CONFIG[\"TEMPLATE\"].format(*CONFIG[\"PRESELECTIONS\"][CONFIG[\"PRESELECTION\"]])\n elif CONFIG[\"SELECTION\"] == \"USER\":\n data = CONFIG[\"TEMPLATE\"].format(*CONFIG[\"USER_SELECTION\"])\n else:\n raise ValueError(\"SELECTION={}\".format(CONFIG[\"SELECTION\"]))\n \n #Get XML data\n r = requests.get('http://overpass-api.de/api/interpreter', params={\"data\": data}, stream=True)\n with open(filename, 'wb') as fobj:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk:\n fobj.write(chunk)\n return r.status_code, filename, os.path.getsize(filename)", "def main():\n\ttile_dimensions = (512, 512)\n\ttile_count = (5, 4) \n\timage = Image.new(\"RGB\", (tile_dimensions[0] * tile_count[0], tile_dimensions[1] * tile_count[1]), None)\n\n\ttry:\n\t\turl = sys.argv[1]\n\texcept:\n\t\tprint \"Please enter a Google Maps URL as a parameter.\"\n\t\tprint \"Usage: $ python get_pano.py https://www.google.com/maps/@34.1027387,-118.340471,3a,75y,32.1h,87.53t/data=!3m7!1e1!3m5!1soInaTCic7TsAAAQDMaZ31A!2e0!3e2!7i13312!8i6656\"\n\t\treturn\n\n\ttry:\n\t\tprint \"************************************\"\n\t\tprint \"Fetching images from Google Maps, this could take some time...\"\n\t\tregex = re.compile(r'panoid\\=([^&]*)', re.I)\n\t\tpano_id = regex.findall(urllib.unquote(url))[0]\n\t\tfor y in range(tile_count[1]):\n\t\t\tfor x in range(tile_count[0]):\n\t\t\t\timg_url = \"https://geo2.ggpht.com/cbk?cb_client=maps_sv.tactile&authuser=0&hl=en&panoid=\" + pano_id + \"&output=tile&x=\"+str(x)+\"&y=\"+str(y)+\"&zoom=3&nbt&fover=2\"\n\t\t\t\tresponse = urllib2.urlopen(img_url)\n\t\t\t\tfile_data = StringIO(response.read())\n\t\t\t\timage.paste(Image.open(file_data), (x * tile_dimensions[0], y * tile_dimensions[1]))\n\t\t\n\t\tfilename = \"pano-\" + pano_id + \".jpg\"\n\t\timage.save(filename)\n\t\tprint \"Success, image saved as \\033[96m\" + filename + \"\\033[00m\"\n\t\tprint \"************************************\"\n\n\n\texcept Exception as e:\n\t\tprint \"Sorry something broke.\"\n\t\tprint e", "def write_maps(self):\n if np.allclose(self.xmap.origin, 0):\n ext = \"ccp4\"\n else:\n ext = \"mrc\"\n\n for q, coor, b in zip(self._occupancies, self._coor_set, self._bs):\n self.conformer.q = q\n self.conformer.coor = coor\n self.conformer.b = b\n self._transformer.density()\n fname = os.path.join(self.directory_name, f\"model.{ext}\")\n self._transformer.xmap.tofile(fname)\n self._transformer.xmap.array -= self.xmap.array\n fname = os.path.join(self.directory_name, f\"diff.{ext}\")\n self._transformer.xmap.tofile(fname)\n self._transformer.reset(full=True)", "def download_tile(map_layer, zoom, x, y):\n try:\n tile_url = map_layer.get_tile_url(zoom, x, y)\n tmp_file, headers = urllib.request.urlretrieve(tile_url)\n return (x, y), tmp_file\n except URLError as e:\n app.logger.info(\"Error downloading tile x={}, y={}, z={} for layer {}: {}\".format(\n x, y, zoom, map_layer, e.reason))\n return (x, y), pkg_resources.resource_filename(\"geos\", \"static/empty_tile.png\")", "def extract():\n queries = querylist_builder()\n \n pathlib.Path('/tmp/street_data').mkdir(parents=True, exist_ok=True) \n for i,q in enumerate(queries):\n print(\"running extract query\")\n url = ENDPOINT + \"?CommandData=\" + q\n print(url)\n r = requests.get(url)\n text_file = open(\"/tmp/street_data/\" + str(i) + \".xml\", 'w')\n data = r.text\n print(data)\n text_file.write(data) \n print(\"data saved for {}\".format(str(i)))\n text_file.close()", "def _list_outputs(self):\n outputs = self._outputs().get()\n\n out_dir = os.path.abspath(os.path.join(os.getcwd(), \"slicesdir\"))\n outputs[\"out_dir\"] = out_dir\n outputs[\"out_files\"] = [\n self._gen_fname(\n basename=f.replace(os.sep, \"_\"),\n cwd=out_dir,\n ext=self.inputs.out_extension,\n )\n for f in self.inputs.in_files\n ]\n return outputs", "def _generate_output(self):\n raise NotImplementedError()", "def save_to_png(self, tiles, output_dir, channel=None):\n plt.ioff()\n\n for idx, tile in enumerate(tiles):\n save_path = f\"{output_dir}/tile_{idx}\"\n fig = self.build_fig()\n img = np.moveaxis(tile, 0, 2)\n \n if channel is None: \n plt.imshow(img)\n plt.savefig(save_path)\n plt.close()\n else:\n plt.imshow(img[:, :, channel])\n plt.savefig(save_path)\n plt.close()\n\n print(\"done converting to png!\")", "def _save_data(self, outputs, latents):\n list(map(lambda e, v: e.append(v), self.curr_patches, outputs))\n list(map(lambda e, v: e.append(v), self.curr_latents, latents))\n self.count_patches += len(self.curr_patches[0][-1])\n\n while self.count_patches >= self.patches_ref:\n aux_patches = list(map(lambda e: tf.concat(e, axis=0),\n self.curr_patches))\n aux_latents = list(map(lambda e: tf.concat(e, axis=0),\n self.curr_latents))\n\n data_name = str((self.out_folder / str(Folders.RAW_DATA))\n / self.curr_img.stem)\n for lvl, (aux_p, aux_l) in enumerate(zip(aux_patches, aux_latents)):\n patches, remaining = tf.split(\n aux_p, [self.patches_ref, len(aux_p) - self.patches_ref],\n axis=0)\n self.curr_patches[lvl] = [remaining]\n latent, remaining = tf.split(\n aux_l, [self.patches_ref, len(aux_l) - self.patches_ref],\n axis=0)\n self.curr_latents[lvl] = [remaining]\n\n patch_size = len(patches[0])\n with Image.open(self.curr_img) as img:\n # width, height -> height, width (to be compatible with\n # arrays)\n img_size = np.array(img.size)[::-1]\n p_size = np.ceil(img_size / patch_size).astype(int)\n\n img = tf.reshape(\n patches, [*p_size, *self.m_cfg['configs']['patch']])\n img = tf.transpose(img, [0, 2, 1, 3, 4])\n img = tf.reshape(img, [*patch_size * p_size, 3])\n img = tf.image.convert_image_dtype(img, tf.uint8)\n img = img[:img_size[0], :img_size[1], :]\n\n latent_name = data_name + '_latent' + str(lvl)\n serialized = tf.io.serialize_tensor(latent)\n tf.io.write_file(latent_name, serialized)\n\n img_name = data_name + '_' + str(lvl) + self.curr_img.suffix\n tf.io.write_file(img_name, tf.image.encode_png(\n img, compression=0))\n\n with open(Path(data_name).with_suffix('.txt'), 'w') as file:\n file.write(str(self.curr_img))\n\n self.count_patches = len(self.curr_patches[0][0])\n if len(self.img_paths):\n self.curr_img = Path(self.img_paths.pop(0))\n self.patches_ref = self.calc_n_patches(\n self.curr_img, self.m_cfg['configs']['patch'][0])", "def __generate_output_data(self):\n if not len(self.output_data) == 0:\n return\n try:\n self.output_data = s.load(open('output/output_data.p', 'rb'))\n self.class_indices = s.load(open('output/class_indices.p', 'rb'))\n if not self.classes_to_visualise == None:\n self.__filter_output_data(self.classes_to_visualise)\n except:\n self.output_data = generate_output_for_test_data(image_data=self.image_data,\n binary_output=self.binary_output) if self.testing else generate_output_for_train_data(\n image_data=self.image_data, binary_output=self.binary_output)\n self.class_indices = get_all_class_indices(training=False) if self.testing else get_all_class_indices()\n if not self.classes_to_visualise == None:\n self.__filter_output_data(self.classes_to_visualise)\n s.dump([out.tolist() for out in self.output_data], open('output/output_data.p', 'wb'))\n s.dump(self.class_indices, open('output/class_indices.p', 'wb'))\n\n self.legend = get_class_names_for_class_indices(list(set(sorted(self.class_indices))))", "def render_tiles(bbox, config, tile_dir, min_zoom=DEFAULT_MIN_ZOOM, max_zoom=DEFAULT_MAX_ZOOM, process_count=DEFAULT_PROCESS_COUNT):\n if not os.path.isdir(tile_dir):\n os.mkdir(tile_dir)\n\n tile_projection = GoogleProjection(max_zoom) \n\n ll0 = (bbox[1], bbox[0])\n ll1 = (bbox[3], bbox[2])\n\n tile_queue = multiprocessing.JoinableQueue()\n\n for zoom in range(min_zoom, max_zoom + 1):\n px0 = tile_projection.fromLLtoPixel(ll0, zoom)\n px1 = tile_projection.fromLLtoPixel(ll1, zoom)\n\n tile_x1 = int(px0[0] / 256.0)\n tile_x2 = int(px1[0] / 256.0) + 1\n tile_y1 = int(px0[1] / 256.0)\n tile_y2 = int(px1[1] / 256.0) + 1\n\n zoom_dir = os.path.join(tile_dir, str(zoom))\n\n if not os.path.isdir(zoom_dir):\n os.mkdir(zoom_dir)\n\n for tile_x in range(tile_x1, tile_x2):\n # Validate x coordinate\n if (tile_x < 0) or (tile_x >= 2**zoom):\n continue\n\n x_dir = os.path.join(zoom_dir, str(tile_x))\n\n if not os.path.isdir(x_dir):\n os.mkdir(x_dir)\n\n for tile_y in range(tile_y1, tile_y2):\n # Validate y coordinate\n if (tile_y < 0) or (tile_y >= 2**zoom):\n continue\n\n filename = os.path.join(x_dir, '%s.png' % str(tile_y))\n\n # Submit tile to be rendered into the queue\n t = (filename, tile_x, tile_y, zoom)\n tile_queue.put(t)\n\n print 'Using %i processes to render %i tiles' % (process_count, tile_queue.qsize())\n\n processes = []\n\n for i in range(process_count):\n renderer = Renderer(tile_queue, config)\n renderer.start()\n\n processes.append(renderer)\n\n try:\n tile_queue.join()\n except KeyboardInterrupt:\n for p in processes:\n p.terminate()", "def _create_outputs(self) -> ComponentOutputs:\n raise NotImplementedError", "def prepare_output_dir(out_dir, test_dir):\r\n\r\n if not out_dir.exists():\r\n out_dir.mkdir()\r\n\r\n # get the necessary file names\r\n file_names = get_file_names(test_dir, args.distance, print_file_names=False)\r\n\r\n # copy the images in the firstIms into the output folder\r\n for name in file_names[1][0]:\r\n file_path = Path(test_dir / name)\r\n copy_to = Path(out_dir / name)\r\n shutil.copy(file_path, copy_to)\r\n\r\n # the firstIms list does not contain the last image,\r\n # so we need to also copy the last image of the secIms into the output folder\r\n last_im = file_names[1][1][-1]\r\n shutil.copy(Path(test_dir/last_im), Path(out_dir/last_im))\r\n\r\n return file_names", "def save_outputs(self):\n write_pickled(join(self.output_folder, \"results.pkl\"), self.get_results())" ]
[ "0.65990293", "0.64210165", "0.6350539", "0.6310586", "0.6223159", "0.6142363", "0.6141133", "0.61198944", "0.60979927", "0.60793155", "0.605626", "0.6051542", "0.6041125", "0.5997875", "0.59818053", "0.5976445", "0.5960446", "0.5947027", "0.5920985", "0.5904805", "0.58695346", "0.5826246", "0.58229274", "0.57859474", "0.5784554", "0.5781388", "0.57667375", "0.5741898", "0.5717632", "0.5698302", "0.5682029", "0.56808096", "0.5673413", "0.56599385", "0.5620443", "0.56134874", "0.5607624", "0.5580844", "0.55531394", "0.55181324", "0.5515566", "0.5492691", "0.5490821", "0.5483614", "0.54643285", "0.5453081", "0.5449275", "0.5446304", "0.5434608", "0.5428889", "0.54224133", "0.54146725", "0.54094684", "0.54074717", "0.5401739", "0.53988457", "0.5390782", "0.5390782", "0.538395", "0.53787094", "0.53681046", "0.5367999", "0.5362539", "0.5356755", "0.5326303", "0.5311486", "0.5299081", "0.5297423", "0.52952373", "0.52943134", "0.52932024", "0.52905184", "0.52905184", "0.528398", "0.5278897", "0.5274261", "0.526649", "0.52649534", "0.525953", "0.52369225", "0.52348864", "0.5232578", "0.5223537", "0.52231526", "0.52204144", "0.5214867", "0.5211587", "0.52103204", "0.52089834", "0.52037686", "0.520079", "0.5196731", "0.51869476", "0.518693", "0.51774573", "0.51755476", "0.51730937", "0.5168592", "0.5168375", "0.5159007", "0.5151666" ]
0.0
-1
Create a DataFrame containing information on all tiles identified as downloadable
def shapely_tileset(processed_query,min_ovp = 0,max_ovp = 1, n_neg = None,buffer = 0): types, xx, yy, qual, tags = [],[],[],[],[] z = processed_query['zoom'] for elem in processed_query['elements']: for tile in elem['tiles']: qq = tile[1] if qq >= min_ovp and qq <= max_ovp: x,y,_ = find_tile_coords(tile[0],z) xx.append(x) yy.append(y) qual.append(tile[1]) tags.append(json.dumps(elem['tags'])) types.append(elem['type']) pos_df = pd.DataFrame({ 'z': z, 'x' : xx, 'y': yy, 'entity': types, 'overlap': qual,'tags': tags, 'placename': processed_query['query_info']['placename'] }) \ .drop_duplicates(subset = ['x','y']) \ .sort_values(by = ['x','y']) if n_neg is None: n_neg = pos_df.shape[0] negt = sample_complement(pos_df['x'],pos_df['y'],n_neg,buffer) neg_df = pd.DataFrame({'z': z,'x': negt[0],'y': negt[1]}) \ .sort_values(by = ['x','y']) return { 'positive': add_latlon(pos_df), 'negative': add_latlon(neg_df) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_tiles(x_index = None, y_index = None):\n db_cursor2 = self.db_connection.cursor()\n\n sql = \"\"\"-- Check for any existing tiles\nselect\n tile_id,\n x_index,\n y_index,\n tile_type_id,\n tile_pathname,\n dataset_id,\n tile_class_id,\n tile_size\nfrom tile_footprint\ninner join tile using(x_index, y_index, tile_type_id)\nwhere (%(x_index)s is null or x_index = %(x_index)s)\n and (%(y_index)s is null or y_index = %(y_index)s)\n and tile_type_id = %(tile_type_id)s\n and dataset_id = %(fc_dataset_id)s\n\n and ctime is not null -- TODO: Remove this after reload\n;\n\"\"\"\n params = {'x_index': x_index,\n 'y_index': y_index,\n 'tile_type_id': tile_type_info['tile_type_id'],\n 'fc_dataset_id': dataset_info['fc_dataset_id']}\n \n log_multiline(logger.debug, db_cursor2.mogrify(sql, params), 'SQL', '\\t')\n db_cursor2.execute(sql, params)\n tile_info = {}\n for record in db_cursor2:\n tile_info_dict = {\n 'x_index': record[1],\n 'y_index': record[2],\n 'tile_type_id': record[3],\n 'tile_pathname': record[4],\n 'dataset_id': record[5],\n 'tile_class_id': record[6],\n 'tile_size': record[7]\n }\n tile_info[record[0]] = tile_info_dict # Keyed by tile_id\n \n log_multiline(logger.debug, tile_info, 'tile_info', '\\t')\n return tile_info", "def get_modis(tiles, save_path, months='', years=''):\n\n\n from bs4 import BeautifulSoup\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n\n mons = [str(i).zfill(2) for i in range(months[0], months[1] + 1)]\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n\n for tile in tiles:\n for yr in yrs:\n for m in mons:\n base_url = \"http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16A2_MONTHLY.MERRA_GMAO_1kmALB/\"\n\n dir_path = \"Y{:}/M{:}/\".format(yr, m)\n url = base_url + dir_path\n soup = BeautifulSoup(urllib2.urlopen(url), \"lxml\")\n hdf_name = soup.find_all('', {\n 'href': re.compile('MOD16A2.A{:}M{:}.{:}.105'.format(yr, m, tile), re.IGNORECASE)})\n files = urllib.urlretrieve(url + hdf_name[0].text, save_path + hdf_name[0].text)\n print(save_path + hdf_name[0].text)\n time.sleep(0.5)", "def tile_data(self) -> List[TileData]:\n return self._tiles.values()", "def fixture_tile_list():\n return {\n \"version\": 1,\n \"revision\": 1,\n \"timestamp\": \"2018-06-19T23:04:32.442Z\",\n \"timestamp_ms\": 1529449472442,\n \"result_code\": 0,\n \"result\": [\n {\n \"tileType\": \"TILE\",\n \"user_uuid\": TILE_USER_UUID,\n \"tile_uuid\": TILE_TILE_UUID,\n \"other_user_uuid\": \"\",\n \"other_user_email\": TILE_EMAIL,\n \"mode\": \"OWNER\",\n \"last_modified_timestamp\": 1482711833985,\n }\n ],\n }", "def __init__tiles__(self):\n return [[Tiles(i, j, Tiles.closed) for j in range(self.cols)] for i in range(self.rows)]", "def save_tiles(df,output_dir,namefunc = None):\n if not isinstance(df,pd.core.frame.DataFrame):\n raise TypeError(\"df must be a pandas DataFrame!\")\n if any(e not in df.columns for e in ('z','x','y')):\n raise ValueError(\"df must have columns x, y, and z\")\n if namefunc is None:\n def namefunc(x,y,z):\n return f'{z}_{x}_{y}.png'\n\n opath = os.path.abspath(os.path.expanduser(output_dir))\n Path(opath).mkdir(parents=True, exist_ok=True)\n L = df.shape[0]\n flocs = [''] * L\n for i,xyz in enumerate(zip(df['x'],df['y'],df['z'])):\n x,y,z = xyz\n print(f\"({i+1} of {L})...\")\n sleep(0.75)\n outloc = os.path.join(opath,namefunc(x,y,z))\n if save_tile(x,y,z,outloc) == 0:\n flocs[i] = outloc\n df = df.assign(file_loc = flocs)\n return df[df['file_loc'] != '']", "def get_news():\n # empty dataframe\n df = pd.DataFrame() \n # read each url in list\n for url in inshorts_urls(): \n # add each dataframe of cards to df\n df = pd.concat([df, get_article(url)])\n # return all urls' cards\n return df", "def get_downloadable_data(url_list):\n downloadable_data_list = []\n for url in url_list:\n soup = visit_homepage(url)\n for link in soup.find_all(class_='resource-url-analytics'):\n downloadable_data_list.append(link['href'])\n return downloadable_data_list", "def dataset_grabber(sess, link):\n json_dict = sess.get(link).json()\n if '.geojson' in link:\n dataset = gpd.GeoDataFrame.from_features(json_dict['features'])\n else:\n dataset = pd.DataFrame(json_dict)\n return dataset", "def get_tiles():\n\t\t\n\tcursor = get_cursor()\n\t\n\tcursor.execute(\"SELECT * FROM fitmeimages ORDER BY shade ASC, id ASC\")\n\treturn cursor.fetchall();", "def _download_tile_wrapper(args):\n return download_tile(*args)", "def get_route_tile_matrix_url(self) -> pd.DataFrame:\n\n if os.path.isfile('route_collection.json'):\n with open('route_collection.json') as f:\n colrow_collection = json.load(f)\n else:\n raise Exception('route_collection.json does not exist, try using server.util.register_route_tile_matrix_url()') \n ## last step, return a pandas matrix\n matrix = pd.DataFrame(index = range(1), columns = range(len(colrow_collection)))\n i = 0\n for item in colrow_collection:\n matrix.iloc[0,i] = self.get_traffic_json_resource(location_data = item, location_type = \"colrow\", zoom = 14)\n i += 1\n\n return matrix", "def update_cache():\n total = isic.get_image_count()\n df = pd.DataFrame()\n for meta in tqdm(isic.get_image_meta(), total=total, desc=\"Updating image list cache\", unit=\"img\"):\n _meta = json_normalize(meta)\n # File name and directory will be named after dataset.name.\n # Make it file system friendly. e.g. remove chars like /\n # Todo: This append stuff is painfully slow... Do something about it\n _meta[\"dataset.name\"] = slugify(_meta[\"dataset.name\"][0])\n df = df.append(_meta, ignore_index=True)\n df.to_csv(cache_file, index=False)", "def open_tiles(self):\n return list(filter(None, self.empty))", "def download_all(conn, logger):\n # setup slices, 24 in total\n slices = [f'year{x}month{y}' for x in [2, 1] for y in range(12, 0, -1)]\n for slice in slices:\n download_intraday_extended(conn, logger, slice)", "def fetch(url: str, cache: str) -> pd.DataFrame:\n r = requests.get(url)\n r.raise_for_status()\n datestamp = date.today().strftime('%Y%m%d')\n name = url.split('/')[-1].replace('.csv','')\n os.makedirs(cache, exist_ok=True)\n filename = os.path.join(cache, f\"{datestamp}_{name}.csv\")\n with open(filename, \"w\") as f:\n f.write(r.text)\n return pd.read_csv(filename)", "def __local_df(soup):\n return __get_local_g1_news(soup)", "def __local_df(soup):\n return __get_local_g1_news(soup)", "def download_data(base_url,\n lista_anni,\n lista_inquinanti):\n \n # Inizializziamo la lista dei df ognuno dei quali corrisponde ad un inquinante\n df_template = pd.DataFrame(columns=['jd','h','1','2','3','4','5','6','7','8','9','10','11','13','14','15','16','38','39','40',\n '41','45','47','48','49','55','56','57','60','83','84','85','86','87','Anno','Inquinante'])\n lista_df = [df_template]\n\t\n\t# Per ogni inquinante\n for chimico in lista_inquinanti:\n \t# Per ogni anno\n for anno in lista_anni:\n print('Retrieving {} for year {} from {}'.format(chimico, anno, compose_url(base_url, anno, chimico)))\n \n # Esegui la richiesta\n r = requests.get(compose_url(base_url, anno, chimico))\n\n # Crea il rispettivo dataframe\n df = write_response(r)\n print('{} rows'.format(len(df)))\n\t\t\t\n\t\t\t# Prendi la linea che corrisponde all'header del df\n columns_ = df.iloc[0].index[0]\n \n \"\"\" Individua i nomi delle colonne splittando la stringa che li contiene tutti\n ed escludendo lestringhe vuote ottenute tramite lo split\"\"\"\n clean_columns = [item.strip()\\\n for item in columns_.split(' ')\\\n if len(item)!=0]\n \n # aggiungo le colonne Anno e Inquinante\n columns = clean_columns + ['Anno', 'Inquinante']\n\t\t\t\n list_rows = []\n # Per ogni linea del df\n for line_idx in range(1, len(df)):\n \t\n # Come nel caso precedente splitto la linea per ottenere le diverse celle\n line = df.iloc[line_idx].values[0].strip().split(' ')\n \n # Quindi ottengo la lista delle celle della riga i-th\n raw_line = [item for item in line if len(item)!=0] \n \n # Aggiungiamo le colonne anno e inquinante\n list_rows += [raw_line + [anno, chimico]]\n\t\t\t\n\t\t\t# Definiamo il nuovo dataset \n df_idx = pd.DataFrame(list_rows, columns=columns)\n \n # Creiamo aggiungiamo alla lista di df da concatenare quello appena creato \n lista_df += [df_idx]\n\n\t# Facciamo la union dei df tenendo conto che le colonne possono essere diverse (concat con pandas)\n df_final = pd.concat(lista_df, ignore_index=False)\n\n # sostituisco i NaN e -999.0 con un valore vuoto\n df_final = df_final.fillna('')\n df_final = df_final.replace(to_replace='-999.0', value='')\n \n return df_final", "def mbtiles(ctx, files, output, overwrite, title, description,\n layer_type, img_format, tile_size, zoom_levels, image_dump,\n num_workers, src_nodata, dst_nodata, resampling):\n output, files = resolve_inout(files=files, output=output,\n overwrite=overwrite)\n inputfile = files[0]\n\n logger = logging.getLogger('rio-mbtiles')\n\n with ctx.obj['env']:\n\n # Read metadata from the source dataset.\n with rasterio.open(inputfile) as src:\n\n validate_nodata(dst_nodata, src_nodata, src.profile.get('nodata'))\n base_kwds = {'dst_nodata': dst_nodata, 'src_nodata': src_nodata}\n\n if src_nodata is not None:\n base_kwds.update(nodata=src_nodata)\n\n if dst_nodata is not None:\n base_kwds.update(nodata=dst_nodata)\n\n # Name and description.\n title = title or os.path.basename(src.name)\n description = description or src.name\n\n # Compute the geographic bounding box of the dataset.\n (west, east), (south, north) = transform(\n src.crs, 'EPSG:4326', src.bounds[::2], src.bounds[1::2])\n\n # Resolve the minimum and maximum zoom levels for export.\n if zoom_levels:\n minzoom, maxzoom = map(int, zoom_levels.split('..'))\n else:\n zw = int(round(math.log(360.0 / (east - west), 2.0)))\n zh = int(round(math.log(170.1022 / (north - south), 2.0)))\n minzoom = min(zw, zh)\n maxzoom = max(zw, zh)\n\n logger.debug(\"Zoom range: %d..%d\", minzoom, maxzoom)\n\n # Parameters for creation of tile images.\n base_kwds.update({\n 'driver': img_format.upper(),\n 'dtype': 'uint8',\n 'nodata': 0,\n 'height': tile_size,\n 'width': tile_size,\n 'count': 3,\n 'crs': TILES_CRS})\n\n img_ext = 'jpg' if img_format.lower() == 'jpeg' else 'png'\n\n # Initialize the sqlite db.\n if os.path.exists(output):\n os.unlink(output)\n # workaround for bug here: https://bugs.python.org/issue27126\n sqlite3.connect(':memory:').close()\n\n conn = sqlite3.connect(output)\n cur = conn.cursor()\n cur.execute(\n \"CREATE TABLE tiles \"\n \"(zoom_level integer, tile_column integer, \"\n \"tile_row integer, tile_data blob);\")\n cur.execute(\n \"CREATE TABLE metadata (name text, value text);\")\n\n # Insert mbtiles metadata into db.\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"name\", title))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"type\", layer_type))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"version\", \"1.1\"))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"description\", description))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"format\", img_ext))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"bounds\", \"%f,%f,%f,%f\" % (west, south, east, north)))\n\n conn.commit()\n\n # Create a pool of workers to process tile tasks.\n pool = Pool(num_workers, init_worker,\n (inputfile, base_kwds, resampling), 100)\n\n # Constrain bounds.\n EPS = 1.0e-10\n west = max(-180 + EPS, west)\n south = max(-85.051129, south)\n east = min(180 - EPS, east)\n north = min(85.051129, north)\n\n # Initialize iterator over output tiles.\n tiles = mercantile.tiles(\n west, south, east, north, range(minzoom, maxzoom + 1))\n\n for tile, contents in pool.imap_unordered(process_tile, tiles):\n\n if contents is None:\n logger.info(\"Tile %r is empty and will be skipped\", tile)\n continue\n\n # MBTiles has a different origin than Mercantile/tilebelt.\n tiley = int(math.pow(2, tile.z)) - tile.y - 1\n\n # Optional image dump.\n if image_dump:\n img_name = '%d-%d-%d.%s' % (\n tile.x, tiley, tile.z, img_ext)\n img_path = os.path.join(image_dump, img_name)\n with open(img_path, 'wb') as img:\n img.write(contents)\n\n # Insert tile into db.\n cur.execute(\n \"INSERT INTO tiles \"\n \"(zoom_level, tile_column, tile_row, tile_data) \"\n \"VALUES (?, ?, ?, ?);\",\n (tile.z, tile.x, tiley, buffer(contents)))\n\n conn.commit()\n\n conn.close()\n # Done!", "def targets_to_dataframe(conn):\n return connect_database.get_table_into_pandas('target_info',conn)", "def downloadData(url : str, descriptor : str):\n assets = datapackage.Package(url).resources\n\n for data in filter(lambda x: x.tabular and x.descriptor['name'] == descriptor, assets):\n response = requests.get(data.descriptor['path'])\n return io.StringIO(response.content.decode('utf-8'))", "def download_data(self):\n headers = {'User-Agent': 'Mozilla/5.0',}\n\n #Request for html data of url page\n r = requests.get(self.url, headers = headers, allow_redirects=True)\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n #Checking if folder path exists, if not, creats it\n i=0\n while i<len(self.folder)-1:\n if self.folder[i] == '/':\n if not os.path.isdir(self.folder[:i]):\n os.mkdir(self.folder[:i])\n i+=1\n if i==len(self.folder)-1:\n if not os.path.isdir(self.folder):\n os.mkdir(self.folder)\n\n # if not os.path.isdir(self.folder):\n # os.mkdir(self.folder)\n\n #Gets every href to zip file with data\n entries = []\n for link in soup.find_all('a'):\n if re.search(\"^data/.*.zip\", link.get('href')):\n entries.append(link.get('href'))\n\n #Gets the newest dataset\n self.getCurrentData(entries)\n\n i=0\n #Saves each file in dataset\n for list in self.ListOfZipFiles:\n if not os.path.isfile(self.folder+list[4:]):\n r = requests.get(self.url+list)\n open(self.folder+list[4:], 'wb').write(r.content)\n #deletes prefix \"data/\"\n self.ListOfZipFiles[i] = list[4:]\n i+=1", "def download_all(): #@save\n for name in DATA_HUB:\n download(name)", "def copy_tiles(self):\n \n return self.tiles", "def download_data(self):\n content = requests.get(self.TOP_250_LIST)\n soup = BeautifulSoup(content.content, 'lxml')\n movies = soup.select('tbody.lister-list tr')\n for m in movies:\n title_column = m.select('td.titleColumn')\n link = self.format_link(title_column[0].a['href'])\n title = self.format_title(title_column[0].a.string.encode('utf-8'))\n path = 'pages/{}.html'.format(title)\n if os.path.isfile(path):\n continue\n response = requests.get(link)\n with open(path, 'wr') as f:\n f.write(response.content)", "def extract(path):\n\n #extracting campus entry leave times, time in school and different wifi router seen in city\n df_campus_entry_leave_times=get_campus_entry_leave_times(path)\n df_diff_wifi_seen= get_diff_wifi_seen(path)\n\n\n #merging the data frames\n df_wifi_features= dataprocessor.merge([df_campus_entry_leave_times, df_diff_wifi_seen], ['ID', 'Date'])\n return df_wifi_features", "def download_report():\n entities = get_names()\n save_csv(entities)", "def extract_details( session_requests, job_id ):\n \n url_prefix = CONFIG[\"url_prefix\"]\n \n #Extract html from web\n url = CONFIG[\"url_jobno\"] + str(job_id)\n tree = scrape_html(session_requests, url)\n \n #Extact description\n title = \"; \".join(tree.xpath(\"//p[@class='listheader']/text()\"))\n description = \"; \".join(tree.xpath(\"//p//text()\")) #more than one element\n \n #Extract files\n num_file = int(tree.xpath(\"count(//p[contains(text(),'Job Description Document :')]//a)\"))\n loop_range = min(num_file, (MAX_NUM_OF_FILES - 1))\n \n file_link = [\"NA\"] * MAX_NUM_OF_FILES\n file_name = [\"NA\"] * MAX_NUM_OF_FILES\n down_file_name = [\"NA\"] * MAX_NUM_OF_FILES\n \n if (num_file > (MAX_NUM_OF_FILES - 1)):\n file_link[(MAX_NUM_OF_FILES - 1)] = \"More than 9 files\"\n file_name[(MAX_NUM_OF_FILES - 1)] = \"More than 9 files\"\n \n for i in range(loop_range):\n file_link[i] = url_prefix + tree.xpath(\"//p[contains(text(),'Job Description Document :')]//a/@href\")[i]\n file_name[i] = tree.xpath(\"//p[contains(text(),'Job Description Document :')]//a/text()\")[i]\n \n ext = find_file_extention(file_name[i])\n down_file_name[i] = download_file(session_requests, file_link[i], job_id, i, ext)\n \n # dataframe\n row_names_link = init_file_dataframe()[1]\n row_names_name = init_file_dataframe()[2]\n row_names_down = init_file_dataframe()[3]\n \n df_link = np.transpose(pd.DataFrame(file_link, row_names_link))\n df_name = np.transpose(pd.DataFrame(file_name, row_names_name))\n df_down = np.transpose(pd.DataFrame(down_file_name, row_names_down))\n \n df_file = pd.DataFrame(data = {\"job_title\": [title], \"description\": [description], \"num_of_file\": [loop_range]})\n df_file = pd.concat([df_file.reset_index(drop=True), df_link], axis=1, sort=False)\n df_file = pd.concat([df_file.reset_index(drop=True), df_name], axis=1, sort=False)\n df_file = pd.concat([df_file.reset_index(drop=True), df_down], axis=1, sort=False)\n \n return df_file", "def get_pybossa_df(obj):\n progress = tqdm.tqdm(desc='Downloading', unit=obj)\n r = get_objects(obj)\n last_fetched = r.json()\n data = last_fetched\n progress.update(len(last_fetched))\n respect_rate_limits(r, progress)\n while _not_exhausted(last_fetched):\n r = get_objects(obj, len(data))\n last_fetched = r.json()\n data += last_fetched\n progress.update(len(last_fetched))\n respect_rate_limits(r, progress)\n progress.close()\n df = pandas.DataFrame(data)\n df.set_index('id', inplace=True, verify_integrity=True)\n return df", "def extract_table( session_requests, tree ):\n \n num_row = int(tree.xpath(\"count(//table[@class='tbl']//tr)\")) - 1\n \n name = [\"NA\"] * num_row\n jobtype = [\"NA\"] * num_row\n post_dt = [\"NA\"] * num_row\n close_dt = [\"NA\"] * num_row\n job_number = [-1] * num_row\n \n df_file = pd.DataFrame()\n \n for i in range(num_row):\n #table data\n name[i] = \"; \".join(tree.xpath(\"//table[@class='tbl']//tr[\" + str(i + 2) + \"]/td[1]/text()\"))\n jobtype[i] = \"; \".join(tree.xpath(\"//table[@class='tbl']//tr[\" + str(i + 2) + \"]/td[last()-2]/text()\"))\n post_dt[i] = \"; \".join(tree.xpath(\"//table[@class='tbl']//tr[\" + str(i + 2) + \"]/td[last()-1]/text()\"))\n close_dt[i] = \"; \".join(tree.xpath(\"//table[@class='tbl']//tr[\" + str(i + 2) + \"]/td[last()]/text()\"))\n \n #description\n try:\n job_title = tree.xpath(\"//table[@class='tbl']//tr[\" + str(i + 2) + \"]/td[2]/a[1]/@onclick\")[0]\n #print(i)\n \n index_start = job_title.find(\"jobno\")\n index_end = job_title.find(\"'\",index_start)\n job_number[i] = int(job_title[index_start+6:index_end])\n \n df_new = extract_details(session_requests, job_number[i])\n df_file = df_file.append(df_new, ignore_index = True, sort=False)\n \n except IndexError:\n job_number[i] = -1\n df_file = df_file.append(pd.Series(), ignore_index = True, sort=False)\n print(\"Error on item: \" + str(i + 1) + \" !!!!!!!!!!\")\n \n \n df_description = pd.DataFrame(data = {\n \"name\": name, \"jobtype\": jobtype, \"post_dt\": post_dt, \"close_dt\": close_dt,\n \"job_number\": job_number})\n \n df = pd.concat([df_description.reset_index(drop=True), df_file], axis=1, sort=False)\n \n return df", "def download_tile(self, xtile, ytile):\n location = 'http://maps.six.nsw.gov.au/arcgis/rest/services/public/NSW_Imagery/MapServer/tile/'\n destination = 'downloaded_tiles/'\n save_name = str(self.zoom_level) + '_' + str(xtile) + '_' + str(ytile)\n tile_url = location + save_name.replace('_', '/')\n tile = requests.get(tile_url, stream=True)\n with open(destination + save_name + '.png', 'wb') as out_file:\n tile.raw.decode_content = True\n shutil.copyfileobj(tile.raw, out_file)\n tilepng = png.Reader(file=tile.raw)\n # shutil.copyfileobj(tilepng, out_file)\n del tile", "def download_dataset(self):\n raise NotImplementedError", "def fetch_save_data_for_region(reg_url):\n # get region page\n region_page = requests.get(reg_url)\n region_name = reg_url.split(\"/\")[-1].split(\".\")[0]\n region_dir = \"data/\" + region_name\n # store the data files\n if not os.path.exists(region_dir):\n os.makedirs(region_dir)\n \n print(\"starting process for\", reg_url)\n \n soup = BeautifulSoup(region_page.text, \"html.parser\")\n # create metadata array\n rows = soup.select(\"table#hor-minimalist-b tr\")\n metadata_array = [get_info_from_row(rows[i]) for i in range(1, len(rows))]\n metadata_array = [m for m in metadata_array if m is not None]\n metadata_lookup = {m[\"id\"]:m for m in metadata_array if m is not None}\n \n data_urls = [m[\"href\"] for m in metadata_array]\n for d in data_urls:\n download_file(d, region_dir+\"/\")\n print(\"len data urls:\", len(data_urls))\n\n # unzip all the files\n unzip_dir = os.getcwd() + \"/\" + region_dir\n for item in os.listdir(unzip_dir): # loop through items in dir\n if item.endswith(\".zip\"): # check for \".zip\" extension\n file_name = unzip_dir + \"/\" + item # get full path of files\n #print(file_name)\n try:\n zip_ref = zipfile.ZipFile(file_name) # create zipfile object\n zip_ref.extractall(unzip_dir) # extract file to dir\n except:\n print(\"unzip, error with file\", file_name)\n zip_ref.close() # close file\n \n \n # load dataframes individually, concat, save\n data_files = [f for f in os.listdir(region_dir) if f.endswith(\".csv\")]\n print(\"len data files\", len(data_files))\n\n data_frames = []\n for d in data_files:\n try:\n new_frame = pd.read_csv(region_dir + \"/\" + d) \n id_ = d.replace(\".csv\", \" \").replace(\"_\", \" \").strip()\n metadata = metadata_lookup[id_]\n new_frame[\"desc\"] = metadata[\"desc\"]\n new_frame[\"type\"] = metadata[\"type\"]\n new_frame[\"med_ec\"] = metadata[\"med_ec\"]\n new_frame[\"flow\"] = metadata[\"flow\"]\n new_frame[\"lat\"] = metadata[\"lat\"]\n new_frame[\"lon\"] = metadata[\"lon\"]\n \n data_frames.append(new_frame) \n except:\n print(\"load csv, error with file\", d)\n data = pd.concat(data_frames)\n print(\"data shape\", data.shape)\n data.to_csv(\"processed_data/\" + region_name +\".csv\", index=False)", "def stich(data, title=None):\n # Get name, list of tiles, width and height\n name = data[\"levels\"][0][\"name\"] \n tiles = data[\"levels\"][0][\"tiles\"]\n width = data[\"levels\"][0][\"width\"]\n height = data[\"levels\"][0][\"height\"]\n\n # Create the directory to place all the downloaded tiles in\n if title: #if title provided, name directory based on that\n dirname = title\n else: #if title not provided, generate a name\n dirname = name + str(width) + str(height)\n os.makedirs(dirname, exist_ok=True)\n os.chdir(dirname)\n\n #Create the empty image based on dimensions\n result = Image.new('RGB', (width, height))\n tile_size = None \n\n # actually get the tiles\n for i in tiles:\n image = get_tile(i['url']) #download image\n if not tile_size:\n tile_size = image.size[0] # on the first tile get the image size\n result.paste(im=image, box=(i['x'] * tile_size, i['y'] * tile_size)) # each tile has a number which isn't\n # it's cooridnate in pixels but it's order. \n # To get pixel coordinate just multiply by the size of each tile\n result.save('final.jpeg') # save file in directory\n os.chdir(os.path.join( os.path.dirname( __file__ ), '..' )) # then navigate back up to the base directory", "def get_tile_info(file_info, img_info):\n all_tiles = []\n new_tiles = {}\n if img_info['invert_x']:\n xmin = img_info['width']*img_info['scale'] - img_info['viewer']['right']\n xmax = img_info['width']*img_info['scale'] - img_info['viewer']['left']\n else:\n xmin = img_info['viewer']['left']\n xmax = img_info['viewer']['right']\n if img_info['invert_y']:\n ymin = img_info['height']*img_info['scale'] - img_info['viewer']['bottom']\n ymax = img_info['height']*img_info['scale'] - img_info['viewer']['top']\n else:\n ymin = img_info['viewer']['top']\n ymax = img_info['viewer']['bottom']\n minCol = int(max(1,math.floor(xmin/file_info['tile_width'])))-1\n maxCol=int(min(img_info['columns'],math.ceil(xmax/file_info['tile_width'])))\n minRow = int(max(1,math.floor(ymin/file_info['tile_height'])))-1\n maxRow = int(min(img_info['rows'],math.ceil(ymax/file_info['tile_height'])))\n \n block_width = int(math.ceil(file_info['tile_width']/img_info['scale']))\n block_height = int(math.ceil(file_info['tile_height']/img_info['scale']))\n \n for row in range(minRow,maxRow):\n y0 = row*file_info['tile_height']\n yf = (row+1)*file_info['tile_height']\n y0_idx = int(y0/img_info['scale'])\n yf_idx = min(y0_idx + block_height, img_info['height'])\n for col in range(minCol,maxCol):\n all_tiles.append(str(col)+','+str(row))\n tile_idx = str(col)+','+str(row)\n if (tile_idx not in img_info['tiles'] or \n 'loaded' not in img_info['tiles'][tile_idx] or\n not img_info['tiles'][tile_idx]['loaded']):\n x0 = col*file_info['tile_width']\n xf = (col+1)*file_info['tile_width']\n x0_idx = int(x0/img_info['scale'])\n xf_idx = min(x0_idx+block_width, img_info['width'])\n tile_width = int((xf_idx-x0_idx)*img_info['scale'])\n tile_height = int((yf_idx-y0_idx)*img_info['scale'])\n new_filepath = get_tile_filename(\n file_info, img_info, x0_idx, xf_idx, y0_idx, yf_idx)\n tile = {\n 'idx': tile_idx,\n 'left': x0,\n 'right': xf,\n 'top': y0,\n 'bottom': yf,\n 'y0_idx': y0_idx,\n 'yf_idx': yf_idx,\n 'x0_idx': x0_idx,\n 'xf_idx': xf_idx,\n 'new_filepath': new_filepath,\n 'loaded': False,\n 'row': row,\n 'col': col,\n 'x': col*file_info['tile_width'],\n 'y': row*file_info['tile_height'],\n 'width': tile_width,\n 'height': tile_height\n }\n if img_info['invert_y']:\n tile['top'] = yf\n tile['bottom'] = y0\n if img_info['invert_x']:\n tile['left'] = xf\n tile['right'] = x0\n new_tiles[tile_idx] = tile\n print('viewer:', img_info['viewer'])\n print('new tiles', new_tiles.keys())\n return all_tiles, new_tiles", "def tile(self, z, x, y_tms):\n logger.debug(_(\"Download tile %s\") % ((z, x, y_tms),))\n # Render each keyword in URL ({s}, {x}, {y}, {z}, {size} ... )\n size = self.tilesize\n s = self.tiles_subdomains[(x + y_tms) % len(self.tiles_subdomains)];\n y_osm = (2**int(z) - 1) - int(y_tms)\n try:\n url = self.tiles_url.format(**locals())\n except KeyError, e:\n raise DownloadError(_(\"Unknown keyword %s in URL\") % e)\n logger.debug(_(\"Retrieve tile at %s\") % url)\n r = DOWNLOAD_RETRIES\n sleeptime = 1\n while r > 0:\n try:\n request = urllib2.Request(url)\n for header, value in self.headers.items():\n request.add_header(header, value)\n stream = urllib2.urlopen(request)\n assert stream.getcode() == 200\n return stream.read()\n except (AssertionError, IOError), e:\n logger.debug(_(\"Download error, retry (%s left). (%s)\") % (r, e))\n r -= 1\n time.sleep(sleeptime)\n # progressivly sleep longer to wait for this tile\n if (sleeptime <= 10) and (r % 2 == 0):\n sleeptime += 1 # increase wait\n raise DownloadError(_(\"Cannot download URL %s\") % url)", "def get_tile(geojson, base_url):\n # open geojson and get tile index\n with open(geojson, 'r') as data:\n tile_geojson = json.load(data)\n features = tile_geojson[\"features\"]\n # get the tile index as x, y, z formats.\n xyz = [features[i]['properties']['tiles'] for i in range(len(features))]\n\n # create tile folder\n tiles_folder = op.splitext(geojson)[0]\n if not op.isdir(tiles_folder):\n makedirs(tiles_folder)\n\n # download and get the list of tiles\n tiles = list()\n for i in range(len(xyz)):\n x=str(xyz[i][0])\n y=str(xyz[i][1])\n z=str(xyz[i][2])\n url = base_url.replace('{x}', x).replace('{y}', y).replace('{z}', z)\n o = urlparse(url)\n _, image_format = op.splitext(o.path)\n tile_bn =\"{}-{}-{}{}\".format(z, x, y,image_format)\n r = requests.get(url)\n tile= op.join(tiles_folder, tile_bn)\n tiles.append(tile)\n with open(tile, 'wb')as w:\n w.write(r.content)\n return tiles", "def create_download_list(path, save_path):\n\n df = pd.read_csv(path)\n adjust_target_variable_labels(df)\n\n portions = []\n for target_label, df_dx1 in df.groupby('dx1'):\n\n if target_label not in LABEL_MAP.keys():\n continue\n\n portion = PORTION_MAP[target_label]\n msk = np.random.rand(len(df_dx1)) < portion\n portions.append(df_dx1[msk])\n\n selected_df = pd.concat(portions)\n selected_df = selected_df['MR ID']\n\n with open(save_path, 'w'):\n for selected in selected_df['MR ID']:\n pass\n\n # selected_df.to_csv(save_path, index=False)", "def populate_billboard_scrapables(self):\n results = MongoClient().billboard.spotify.find()\n self.df = pd.DataFrame(\n data=map(\n lambda r: (\n r[\"metadata\"][\"id\"],\n r[\"metadata\"][\"artists\"][0][\"name\"],\n r[\"metadata\"][\"name\"],\n ),\n results,\n ),\n columns=[\"track_id\", \"artist_name\", \"title\"],\n )\n print(f\"Tracks identified to scrape lyrics: {self.df.shape[0]}\")", "def extract_network(tracer_file):\n tracer_network = pd.DataFrame()\n iso_list = []\n\n for key in ['Z', 'A']:\n tracer_network[key] = np.array(tracer_file[key], dtype=int)\n\n for i in range(len(tracer_network)):\n row = tracer_network.loc[i]\n iso_str = network.get_isotope_str(z=row['Z'], a=row['A'])\n iso_list += [iso_str]\n\n tracer_network['isotope'] = iso_list\n return tracer_network[['isotope', 'Z', 'A']]", "async def get_tile_cache_preview(\n *, request: Request, dataset: str, version: str, implementation\n):\n\n tile_caches = get_dataset_tile_caches(dataset, version, implementation)\n sources = {\n \"carto-dark\": {\n \"type\": \"raster\",\n \"tiles\": [\n \"https://a.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n \"https://b.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n \"https://c.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n \"https://d.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n ],\n },\n }\n\n layers = [\n {\n \"id\": \"carto-dark-layer\",\n \"type\": \"raster\",\n \"source\": \"carto-dark\",\n \"minzoom\": 0,\n \"maxzoom\": 22,\n },\n ]\n for tile in tile_caches:\n if tile[\"asset_type\"] == \"Static vector tile cache\":\n try:\n style_specs = await get_static_vector_tile_cache_style_spec(tile)\n except ClientError:\n style_specs = get_default_style_spec(tile)\n else:\n style_specs = get_default_style_spec(tile)\n\n layers = [*layers, *style_specs[\"layers\"]]\n sources[dataset] = style_specs[\"sources\"][dataset]\n\n if len(layers) == 1:\n raise HTTPException(\n status_code=404, detail=\"No tile caches available for this dataset.\"\n )\n\n return templates.TemplateResponse(\n \"tile_preview.html\",\n context={\"sources\": sources, \"layers\": layers, \"request\": request},\n )", "def download_all_stocks():\n stocks = get_stocklist()\n dfs = {}\n for i, r in stocks.iterrows():\n start = time.time()\n s = r['Ticker']\n stockfile = '../stockdata/' + s + '.csv.gz'\n print('downloading', s)\n stock = quandl.get('EOD/' + s)\n stock.to_csv(stockfile, compression='gzip')\n dfs[s] = stock\n print('took', time.time() - start, 's')\n\n return dfs", "def prepare_dataset() -> Tuple[pd.DataFrame, Dict]:\n\n data_dir = Path.cwd()/\"freiburg_grocery_images\"\n labels = [directory.name for directory in data_dir.iterdir()]\n label_map = {label: i for i, label in enumerate(labels)}\n\n all_items = [str(file) for label in labels for file in (data_dir/label).iterdir()]\n labels_of_items = [label for label in labels for file in (data_dir/label).iterdir()]\n\n df = pd.DataFrame({\"Image\": all_items, \"Label\": labels_of_items})\n return df, label_map", "def findTiles(request, tree, removeHeadLinks=False, ignoreHeadTiles=False):\n \n tiles = []\n baseURL = request.getURL()\n\n # Find tiles in the head of the page\n if not ignoreHeadTiles or removeHeadLinks:\n for tileNode in headTileXPath(tree):\n tileHref = tileNode.get('href', None)\n\n if tileHref is not None:\n tileId = \"__tile_%s\" % uuid.uuid4()\n tileHref = urljoin(baseURL, tileHref)\n \n if removeHeadLinks:\n tileNode.getparent().remove(tileNode)\n tileNode = None\n \n if not ignoreHeadTiles:\n tiles.append((tileId, tileHref, tileNode,))\n\n # Find tiles in the body\n for tileNode in tree.getroot().cssselect(\".tile-placeholder\"):\n tileId = tileNode.get('id', None)\n tileHref = tileNode.get('data-tile-href', None)\n\n if tileHref is not None:\n \n # If we do not have an id, generate one\n if tileId is None:\n tileId = \"__tile_%s\" % uuid.uuid4()\n tileNode.attrib['id'] = tileId\n \n tileHref = urljoin(baseURL, tileHref)\n tiles.append((tileId, tileHref, tileNode,))\n\n return tiles", "def fetch_data(self) -> pd.DataFrame:\r\n os.chdir(r'\\\\192.168.8.90\\投研部\\Jessica\\test_data')\r\n if self.tic in ['RB.CCRI', 'HC.CCRI', 'I.CCRI', 'J.CCRI', 'JM.CCRI', 'ZC.CCRI']:\r\n f = pd.read_hdf('data.h5', 'snc')\r\n if self.tic in ['CU.CCRI', 'ZN.CCRI', 'AL.CCRI', 'NI.CCRI']:\r\n f = pd.read_hdf('data.h5', 'met')\r\n data = f.loc[f.loc[:, 'sec_code'] == self.tic, :]\r\n # extract I.CCRI data\r\n table = pd.pivot_table(data, index=['date'], columns=['factor_code'], values='factor_value')\r\n table = table.sort_values(by='date')\r\n \r\n return table", "def _download_data_from_nfs_connection(self) -> 'DataFrame':\n\n # note: as we need to load a data into the memory,\n # we are using pure requests and helpers from the WML client\n data_path = self.location.path\n connection_id = self.connection.asset_id\n\n return self._download_data_from_nfs_connection_using_id_and_path(connection_id, data_path)", "def download_dataset(dataset, destination):\n\n # Get images belonging to the requested dataset from cache\n cache_df = pd.read_csv(cache_file)\n df = cache_df.loc[cache_df['dataset.name'] == dataset]\n assert (df.shape[0] > 0), \"Dataset {0} does not exist\".format(dataset)\n\n # Create metadata for dataset that includes the file image paths\n print(\"Preprocessing metadata.\")\n files = []\n for _, row in df.iterrows():\n\n if type(row[\"meta.clinical.diagnosis\"]) == str:\n path = os.path.join(row[\"dataset.name\"], slugify(row[\"meta.clinical.diagnosis\"]))\n elif type(row[\"meta.clinical.diagnosis\"]) == str:\n path = os.path.join(row[\"dataset.name\"], slugify(row[\"meta.clinical.benign_malignant\"]))\n else:\n path = os.path.join(row[\"dataset.name\"], \"unknown\")\n\n files.append(os.path.join(path, \"{}.jpg\".format(row[\"_id\"])))\n df[\"file\"] = files\n df.to_csv(os.path.join(destination, \"{0}.csv\".format(dataset)), index=False)\n\n # Download images\n print(\"Downloading images from dataset: {}\".format(dataset))\n for _, row in tqdm(df.iterrows(), total=df.shape[0], desc=\"Downloading images\", unit=\"img\"):\n isic.download_image(row[\"_id\"], os.path.join(destination,row[\"file\"]))", "def get_shot_data(self, **kwargs: str) -> pd.DataFrame:\n data = self._get_data(query=\"shotsData\", **kwargs).T\n return data", "def glass_pandas(self):\n # pandas.set_option('display.width', 120)\n # TODO timeit (git_implementation) vs (my_implementation)\n # * df = pd.DataFrame(json.loads(r.text))\n # * df = df.set_index('t')\n # * df.index = pd.to_datetime(df.index, unit='s')\n # * df = df.sort_index()\n # * s = df.v\n # * s.name = '_'.join(url.split('/')[-2:])\n # * return s\n # for elem in self.loaded:\n # _metric, _data = elem[1]['_metrics'], elem[1]['_data']\n # try:\n # frame_keys = ['t'] + list(_data[0]['o'].keys())\n # framed = pandas.DataFrame(\n # data=[{k: (_data[iters]['t'] if k in 't' else _data[iters]['o'][k])\n # for k in frame_keys} for iters in range(len(_data))],\n # columns=frame_keys)\n # except KeyError:\n # framed = pandas.DataFrame(_data)\n # framed.set_index('t', inplace=True)\n # framed.index = pandas.to_datetime(\n # framed.index.to_flat_index(), unit='s', infer_datetime_format=True)\n # framed.sort_index(inplace=True)\n # framed.name = _metric\n # print(framed.name)\n # print(framed)", "def data_frame_creator(self):\n\n rgb_dir = [\n self.dataset_address + sequence_f + rgb_f\n for rgb_f in self.rgb_folder for sequence_f in self.sequence_folder\n ]\n rgb_data = [\n rgb_d + rgb for rgb_d in rgb_dir for rgb in os.listdir(rgb_d)\n ]\n\n depth_dir = [\n self.dataset_address + sequence_f + depth_f\n for depth_f in self.depth_folder\n for sequence_f in self.sequence_folder\n ]\n depth_data = [\n depth_d + depth for depth_d in depth_dir\n for depth in os.listdir(depth_d)\n ]\n\n segmentation_dir = [\n self.dataset_address + sequence_f + segmentation_f\n for segmentation_f in self.segmentation_folder\n for sequence_f in self.sequence_folder\n ]\n segmentation_data = [\n segmentation_d + segmentation\n for segmentation_d in segmentation_dir\n for segmentation in os.listdir(segmentation_d)\n ]\n\n dataset = {\n 'RGB': rgb_data,\n 'DEPTH': depth_data,\n 'SEGMENTATION': segmentation_data\n }\n\n if self.shuffle:\n return pd.DataFrame(dataset).sample(frac=1)\n\n return pd.DataFrame(dataset)", "def download_potholes():\n\n\tlink = \"https://data.cityofchicago.org/api/views/7as2-ds3y/rows.csv?accessType=DOWNLOAD\"\n\tdf = pd.read_csv(link)\n\tdf = df[(df.STATUS == \"Open\") | (df.STATUS == \"Open - Dup\")]\n\tdf = df[[\"LATITUDE\", \"LONGITUDE\"]]\n\tdf = df.dropna(axis =0, subset=[\"LATITUDE\", \"LONGITUDE\"])\n\treturn df", "def create_overview_tiles(tile_job_info, output_folder, options):\n mem_driver = gdal.GetDriverByName('MEM')\n tile_driver = tile_job_info.tile_driver\n out_driver = gdal.GetDriverByName(tile_driver)\n\n tilebands = tile_job_info.nb_data_bands + 1\n\n # Usage of existing tiles: from 4 underlying tiles generate one as overview.\n\n tcount = 0\n for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):\n tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]\n tcount += (1 + abs(tmaxx-tminx)) * (1 + abs(tmaxy-tminy))\n\n ti = 0\n\n if tcount == 0:\n return\n\n if not options.quiet:\n print(\"Generating Overview Tiles:\")\n\n progress_bar = ProgressBar(tcount)\n progress_bar.start()\n\n for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):\n tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]\n for ty in range(tmaxy, tminy - 1, -1):\n for tx in range(tminx, tmaxx + 1):\n\n ti += 1\n ytile = GDAL2Tiles.getYtile(ty, tz, options)\n tilefilename = os.path.join(output_folder,\n str(tz),\n #str(tx),\n #\"%s.%s\" % (ytile, tile_job_info.tile_extension))\n '{0:04d}'.format(tx) + \"_\" + '{0:04d}'.format(ytile) + \".\" + tile_job_info.tile_extension)\n\n if options.verbose:\n print(ti, '/', tcount, tilefilename)\n\n if options.resume and os.path.exists(tilefilename):\n if options.verbose:\n print(\"Tile generation skipped because of --resume\")\n else:\n progress_bar.log_progress()\n continue\n\n # Create directories for the tile\n if not os.path.exists(os.path.dirname(tilefilename)):\n os.makedirs(os.path.dirname(tilefilename))\n\n dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size,\n 2 * tile_job_info.tile_size, tilebands)\n # TODO: fill the null value\n dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size,\n tilebands)\n\n # TODO: Implement more clever walking on the tiles with cache functionality\n # probably walk should start with reading of four tiles from top left corner\n # Hilbert curve\n\n children = []\n # Read the tiles and write them to query window\n for y in range(2 * ty, 2 * ty + 2):\n for x in range(2 * tx, 2 * tx + 2):\n minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1]\n if x >= minx and x <= maxx and y >= miny and y <= maxy:\n ytile2 = GDAL2Tiles.getYtile(y, tz+1, options)\n dsquerytile = gdal.Open(\n os.path.join(output_folder, str(tz + 1),\n '{0:04d}'.format(x) + \"_\" + '{0:04d}'.format(ytile2) + \".\" + tile_job_info.tile_extension),\n #str(x), \"%s.%s\" % (ytile2, tile_job_info.tile_extension)),\n gdal.GA_ReadOnly)\n if (ty == 0 and y == 1) or (ty != 0 and (y % (2 * ty)) != 0):\n tileposy = 0\n else:\n tileposy = tile_job_info.tile_size\n if tx:\n tileposx = x % (2 * tx) * tile_job_info.tile_size\n elif tx == 0 and x == 1:\n tileposx = tile_job_info.tile_size\n else:\n tileposx = 0\n dsquery.WriteRaster(\n tileposx, tileposy, tile_job_info.tile_size,\n tile_job_info.tile_size,\n dsquerytile.ReadRaster(0, 0,\n tile_job_info.tile_size,\n tile_job_info.tile_size),\n band_list=list(range(1, tilebands + 1)))\n children.append([x, y, tz + 1])\n\n scale_query_to_tile(dsquery, dstile, tile_driver, options,\n tilefilename=tilefilename)\n # Write a copy of tile to png/jpg\n if options.resampling != 'antialias':\n # Write a copy of tile to png/jpg\n out_driver.CreateCopy(tilefilename, dstile, strict=0)\n\n del dstile\n\n options.generatedFiles.append(tilefilename)\n # applyLegend(tilefilename, options.legendObj)\n\n if options.verbose:\n print(\"\\tbuild from zoom\", tz + 1,\n \" tiles:\", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty),\n (2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1))\n\n # # Create a KML file for this tile.\n # if tile_job_info.kml:\n # with open(os.path.join(\n # output_folder,\n # '%d/%d/%d.kml' % (tz, tx, ty)\n # ), 'wb') as f:\n # f.write(generate_kml(\n # tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,\n # get_tile_swne(tile_job_info, options), options, children\n # ).encode('utf-8'))\n\n if not options.verbose and not options.quiet:\n progress_bar.log_progress()", "def download_data(study, engine, has_trips=True, filter_coverage=False):\n\n def to_datetime(df):\n df[\"started_at\"] = pd.to_datetime(df[\"started_at\"], utc=True)\n df[\"finished_at\"] = pd.to_datetime(df[\"finished_at\"], utc=True)\n return df\n\n exclude_purpose_tist = [\n \"Light Rail\",\n \"Subway\",\n \"Platform\",\n \"Trail\",\n \"Road\",\n \"Train\",\n \"Bus Line\",\n ]\n\n print(\"\\t download staypoints\")\n sp = gpd.read_postgis(\n sql=\"select * from {}.{}\".format(study, sp_name_dict[study]),\n con=engine,\n geom_col=\"geom\",\n index_col=\"id\",\n )\n sp = to_datetime(sp)\n\n print(\"\\t download locations\")\n sql = f\"SELECT * FROM {study}.{locs_name_dict[study]}\"\n locs = ti.io.read_locations_postgis(sql, con=engine, index_col=\"id\")\n\n # studies with trips\n gap_treshold = 12\n if has_trips:\n print(\"\\t download trips\")\n trips = ti.io.read_trips_postgis(\n f\"select * from {study}.trips\", con=engine\n )\n if filter_coverage:\n # get_triplegs(study=study, engine=engine)\n print(\"\\t download triplegs\")\n tpls = pd.read_sql(\n sql=f\"select id, user_id, started_at, finished_at from {study}.triplegs\",\n con=engine,\n index_col=\"id\",\n )\n tpls = to_datetime(tpls)\n sp, locs, trips = filter_tracking_coverage(sp, locs, tpls, trips)\n\n # studies without trips (Foursquare)\n else:\n sp = sp[~sp[\"purpose\"].isin(exclude_purpose_tist)]\n trips = None\n return (sp, locs, trips, gap_treshold)", "def download():\r\n reader = GSODDataReader()\r\n year_list = range(2001, 2012)\r\n austin = reader.collect_data(year_list, exact_station=True,\r\n station_name='AUSTIN CAMP MABRY', state='TX', country='US')\r\n houston = reader.collect_data(year_list, exact_station=True,\r\n station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')\r\n new_york = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEW YORK/LA GUARDIA', state='NY', country='US')\r\n newark = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEWARK INTL AIRPORT', state='NJ', country='US')\r\n punta_arenas = reader.collect_data(year_list, exact_station=True,\r\n station_name='PUNTA ARENAS', country='CH')\r\n wellington = reader.collect_data(year_list, exact_station=True,\r\n station_name='WELLINGTON AIRPORT', country='NZ')\r\n store = HDFStore('weather.h5')\r\n store['austin'] = austin\r\n store['houston'] = houston\r\n store['nyc'] = new_york\r\n store['newark'] = newark\r\n store['punta_arenas'] = punta_arenas\r\n store['wellington'] = wellington\r\n store.close()", "def __get_files(self):\r\n \r\n files = []\r\n with requests.Session() as s:\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'}\r\n respons = s.get(self.__url, headers=headers).text\r\n soup = BeautifulSoup(respons, 'html.parser')\r\n data_files = [link.get('href') for link in soup.find_all('a', class_=\"btn-primary\")]\r\n for year in soup.find_all('td', class_=\"align-middle\"):\r\n regex = re.compile(r\"data/data-?gis({year}|\\-rok\\-{year})\\.zip\".format(year=year.text))\r\n if any((match := regex.match(link)) for link in data_files):\r\n files.append(match.group(0))\r\n else:\r\n files.append(data_files[-1])\r\n return files", "def get_data():\n \n \"\"\" Prepare variables\"\"\"\n urls = {\"cases\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv\",\n \"deaths\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv\",\n \"recovered\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv\"}\n\n localnames = {\"cases\": \"Cases.csv\",\n \"deaths\": \"Deaths.csv\",\n \"recovered\": \"Recovered.csv\"}\n\n dfs = {\"cases\": None,\n \"deaths\": None,\n \"recovered\": None}\n\n \"\"\" Download\"\"\"\n for key in urls.keys():\n url = urls[key]\n localname = localnames[key]\n urllib.request.urlretrieve(url, localname)\n\n \"\"\" Load variables\"\"\"\n for key in dfs.keys():\n dfs[key] = pd.read_csv(localnames[key])\n \n \"\"\" Return\"\"\"\n return(dfs)", "def aggregate_timetable_details(logger):\n df_array = []\n for page in range(0, MAX_PAGE+1): # MAX_PAGE inclusive\n filename = dirname + 'TimeTablePage_{}'.format(page) + '.csv'\n if os.exists(filename):\n # When database is ready, this info can be recorded there\n logger.info('File[{file}] already downloaded. Reading it'.format(file=filename))\n url = filename\n else:\n count = page * 20\n url = TIMETABLE_URL + '&count={count}&page={page}'.format(count=count, page=page)\n logger.info('Fetch TimeTableDetails from URL[{url}] page[{page}] into {directory}'.format(url=url, page=page, directory=dirname))\n \n try:\n df = pd.read_html(url)[1] # the table of interest from the list\n except Exception as e:\n logger.error('Exception when reading HTML:[{exception}]'.format(exception=e))\n exit(0)\n\n logger.info('Writing to [{file}]'.format(file=filename))\n df.to_csv(filename, index=False)\n df_array.append(df)\n\n df = pd.concat(df_array)\n print(df.head())\n print(df.tail())\n\n return 0", "def download(self, verbose):\n\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from COVID-19 Open Data by Google Cloud Platform https://github.com/GoogleCloudPlatform/covid-19-open-data\")\n # Index\n i_cols = [\"location_key\", \"country_name\", \"subregion1_name\", \"subregion2_name\", \"iso_3166_1_alpha_3\"]\n i_df = pd.read_csv(self.URL_I, usecols=i_cols)\n # Mobility\n m_df = pd.read_csv(self.URL_M)\n m_df = (m_df.set_index([\"date\", \"location_key\"]) + 100).reset_index()\n # Combine data\n df = m_df.merge(i_df, how=\"left\", on=\"location_key\")\n # Location (country/province)\n df = df.loc[df[\"subregion2_name\"].isna()]\n df[self.PROVINCE] = df[\"subregion1_name\"].fillna(self.UNKNOWN).apply(unidecode)\n df[\"country_name\"] = df[\"country_name\"].replace(\n {\n # CIV\n \"Ivory Coast\": \"Cote d'Ivoire\",\n }\n )\n return df", "def write_overview_tile(self, tx, ty, tz,tms_osm):\n\n image_format = self.get_overview_tile_format(tx, ty, tz)\n\n if image_format is None:\n return\n else:\n num_bands = self.get_num_bands(image_format)\n\n dsquery = self.mem_drv.Create('', 2*self.tile_size, 2*self.tile_size, num_bands)\n self.fill_init_dest(dsquery)\n # tms: z=19: 281626\n # -z=18-140813 176168*2=352336; 176168*2+1=352337\n # -- 352336,352337\n y_from=2*ty\n y_to=2*ty + 1\n ty_tms=ty;\n s_y_type=\"tms\"\n if tms_osm:\n # osm: z=19: 281626\n # -z=18-140813 85975*2+1=171951; 85975*2=171950\n # -- 171951,171950 [in range: last/end not used]\n y_from=2*ty + 1\n y_to=2*ty\n ty_tms=(2**tz-1) - ty\n s_y_type=\"osm\"\n s_tile_id=\"{0}-{1}-{2}.{3}\".format(str(tz), str(tx),str(ty),s_y_type)\n if self.verbose:\n # Build from zoom 19 tiles: (281626, 171951) (281627, 171951) (281626, 171950) (281627, 171950)\n print \"\\tBuild [\",s_tile_id,\"] from [\",self.output_dir,\"] zoom\", tz+1,\" tiles [\",s_y_type,\"]: \", (2*tx, y_from), (2*tx+1, y_from),(2*tx, y_to), (2*tx+1, y_to)\n\n for cx, cy, child_image_format in self.iter_children(tx, ty, tz):\n if (ty_tms==0 and cy==1) or (ty_tms!=0 and (cy % (y_from)) != 0):\n tileposy = 0\n else:\n tileposy = self.tile_size\n if tx:\n tileposx = cx % (2*tx) * self.tile_size\n elif tx==0 and cx==1:\n tileposx = self.tile_size\n else:\n tileposx = 0\n\n path = self.get_full_path(cx, cy, tz+1, format_extension[child_image_format])\n\n dsquerytile = gdal.Open(path, gdal.GA_ReadOnly)\n\n dsquery.WriteRaster(tileposx, tileposy, self.tile_size, self.tile_size,\n dsquerytile.ReadRaster(0, 0, self.tile_size, self.tile_size),\n band_list=range(1, dsquerytile.RasterCount+1))\n\n if image_format == \"PNG\" and dsquerytile.RasterCount != num_bands:\n dsquery.WriteRaster(tileposx, tileposy, self.tile_size, self.tile_size,\n self.get_alpha_filler(), band_list=[num_bands])\n\n dstile = self.mem_drv.Create('', self.tile_size, self.tile_size, num_bands)\n path = self.get_full_path(tx, ty, tz, format_extension[image_format])\n self.resampler(path, dsquery, dstile, image_format)", "def download_postcode_areas() -> pd.DataFrame:\n\n uk_cities_postcodes = \"https://en.wikipedia.org/wiki/List_of_postcode_areas_in_the_United_Kingdom\"\n\n postcodes_tables = pd.read_html(uk_cities_postcodes)\n postcode_table = postcodes_tables[0]\n\n print(\"Saving the postcodes....\")\n output_path = path.join(\"../..\", \"datasets\", \"uk_postcodes\", f\"postcodes.csv\")\n postcode_table.to_csv(output_path)\n print(\"Saving the postcodes....DONE\")\n return postcode_table", "def get_weather_data(lat='40.761440',lng='-73.981806'):\r\n key ='********************************'\r\n x = pd.DataFrame()\r\n unix_now = int((dt.datetime.now()- dt.datetime(1970,1,1)).total_seconds())\r\n for time in range(unix_now-86400, unix_now+604800, 86400):\r\n rsp = rq.get('https://api.darksky.net/forecast/{}/{},{},{}'.format(key, lat, lng, time))\r\n rsp_json = json.loads(rsp.text)\r\n row = json_normalize(rsp_json[\"daily\"]['data'])\r\n x = x.append(row)\r\n \r\n x = x[['icon','apparentTemperatureHigh','apparentTemperatureLow','cloudCover','humidity','precipProbability',\r\n 'pressure','visibility','windBearing','windGust','windSpeed']].reset_index(drop=True)\r\n return x", "def export_data(self):\n return self.export_all_data()", "def download_brick_catalog(brick):\n urls = {1: 'http://archive.stsci.edu/pub/hlsp/phat/brick01/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12058-m31-b01_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 2: 'http://archive.stsci.edu/pub/hlsp/phat/brick02/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12073-m31-b02_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 3: 'http://archive.stsci.edu/pub/hlsp/phat/brick03/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12109-m31-b03_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 4: 'http://archive.stsci.edu/pub/hlsp/phat/brick04/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12107-m31-b04_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 5: 'http://archive.stsci.edu/pub/hlsp/phat/brick05/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12074-m31-b05_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 6: 'http://archive.stsci.edu/pub/hlsp/phat/brick06/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12105-m31-b06_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 7: 'http://archive.stsci.edu/pub/hlsp/phat/brick07/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12113-m31-b07_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 8: 'http://archive.stsci.edu/pub/hlsp/phat/brick08/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12075-m31-b08_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 9: 'http://archive.stsci.edu/pub/hlsp/phat/brick09/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12057-m31-b09_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 10: 'http://archive.stsci.edu/pub/hlsp/phat/brick10/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12111-m31-b10_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 11: 'http://archive.stsci.edu/pub/hlsp/phat/brick11/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12115-m31-b11_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 12: 'http://archive.stsci.edu/pub/hlsp/phat/brick12/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12071-m31-b12_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 13: 'http://archive.stsci.edu/pub/hlsp/phat/brick13/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12114-m31-b13_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 14: 'http://archive.stsci.edu/pub/hlsp/phat/brick14/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12072-m31-b14_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 15: 'http://archive.stsci.edu/pub/hlsp/phat/brick15/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12056-m31-b15_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 16: 'http://archive.stsci.edu/pub/hlsp/phat/brick16/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12106-m31-b16_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 17: 'http://archive.stsci.edu/pub/hlsp/phat/brick17/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12059-m31-b17_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 18: 'http://archive.stsci.edu/pub/hlsp/phat/brick18/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12108-m31-b18_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 19: 'http://archive.stsci.edu/pub/hlsp/phat/brick19/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12110-m31-b19_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 20: 'http://archive.stsci.edu/pub/hlsp/phat/brick20/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12112-m31-b20_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 21: 'http://archive.stsci.edu/pub/hlsp/phat/brick21/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12055-m31-b21_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 22: 'http://archive.stsci.edu/pub/hlsp/phat/brick22/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12076-m31-b22_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 23: 'http://archive.stsci.edu/pub/hlsp/phat/brick23/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12070-m31-b23_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits'} # NOQA\n url = urls[brick]\n output_path = os.path.join(os.getenv('PHATV2DATA'), os.path.basename(url))\n print \"Downloading {url}\".format(url=url)\n cmd = 'wget -c -nc -q -O {output} {input}'.format(output=output_path,\n input=url)\n print \"Started at\", datetime.utcnow()\n if not os.path.exists(output_path):\n subprocess.call(cmd, shell=True)\n print \"Ended at \", datetime.utcnow()", "def get_tiles(self) -> list:\n n_rows = self.mosaic_dimensions[0]\n n_columns = self.mosaic_dimensions[1]\n return [\n self.get_tile(i_row, i_column)\n for i_row in range(n_rows)\n for i_column in range(n_columns)\n ]", "def list_tiles_covering_land(self):\n\n land_tiles = Equi7Grid._static_data[self.core.tag][\"coverland\"][\n self.core.tiletype]\n return list(land_tiles)", "def get_dfs(npages=927):\n print(\"loading data\")\n try:\n os.makedirs('./data')\n except FileExistsError:\n pass\n\n def fp(pagenum): return './data/%s.csv' % pagenum\n\n dfs = (c(\n pagenum,\n get_page,\n parse_html_table(pagenum, fp(pagenum)),\n ) if not exists(fp(pagenum)) else pd.read_csv(fp(pagenum))\n for pagenum in range(1, npages)\n )\n\n df = pd.concat(dfs)\n return df", "def getTile(self, lat, lon):\r\n if self.childFileListDownload is not None and self.childFileListDownload.is_alive():\r\n '''print \"Getting file list\"'''\r\n return 0\r\n elif not self.filelist:\r\n '''print \"Filelist download complete, loading data\"'''\r\n data = open(self.filelist_file, 'rb')\r\n self.filelist = pickle.load(data)\r\n\r\n try:\r\n continent, filename = self.filelist[(int(lat), int(lon))]\r\n except KeyError:\r\n '''print \"here??\"'''\r\n return 0\r\n\r\n if not os.path.exists(os.path.join(self.cachedir, filename)):\r\n if self.childTileDownload is None or not self.childTileDownload.is_alive():\r\n self.childTileDownload = multiprocessing.Process(target=self.downloadTile, args=(continent, filename))\r\n self.childTileDownload.start()\r\n '''print \"Getting Tile\"'''\r\n return 0\r\n elif self.childTileDownload is not None and self.childTileDownload.is_alive():\r\n '''print \"Still Getting Tile\"'''\r\n return 0\r\n # TODO: Currently we create a new tile object each time.\r\n # Caching is required for improved performance.\r\n try:\r\n return SRTMTile(os.path.join(self.cachedir, filename), int(lat), int(lon))\r\n except InvalidTileError:\r\n return 0", "def init_file_dataframe():\n \n row_names_link = [\"NA\"] * MAX_NUM_OF_FILES\n row_names_name = [\"NA\"] * MAX_NUM_OF_FILES\n row_names_down = [\"NA\"] * MAX_NUM_OF_FILES\n \n for i in range(MAX_NUM_OF_FILES):\n row_names_link[i] = \"link_\" + str(i + 1)\n row_names_name[i] = \"name_\" + str(i + 1)\n row_names_down[i] = \"down_\" + str(i + 1)\n \n df = pd.DataFrame(columns = row_names_link + row_names_name + row_names_down)\n \n return df, row_names_link, row_names_name, row_names_down", "def __extract_info(self) -> Results:\n results: Results = []\n\n response = request(self.home_url)\n\n html = bs(response, \"lxml\")\n table = html.find(\"table\")\n for row in table.find_all(\"tr\")[1:]:\n col1, col2, col3 = row.find_all(\"td\")\n filename1, perc1 = col1.text.strip().split()\n filename2, perc2 = col2.text.strip().split()\n\n with ThreadPoolExecutor() as executor:\n future = executor.submit(self.__get_line_numbers, col1.a.get(\"href\"))\n lines = future.result()\n\n result_dict = Result(\n file1=filename1,\n file2=filename2,\n percentage_file1=perc_str_to_int(perc1),\n percentage_file2=perc_str_to_int(perc2),\n no_of_lines_matched=int(col3.text.strip()),\n lines_matched=lines,\n )\n results.append(result_dict)\n return results", "def download(df_shorter,folderName):\n os.mkdir(str(folderName))\n path = os.getcwd()+'\\\\'+str(folderName)+'\\\\'\n #add column with video link generated from IDs\n df_shorter['urls'] = df_shorter['id'].apply(lambda x: generateLinkFromId(x))\n vid_dl = []\n i = 1\n for url in df_shorter['urls']:\n if url != False:\n name = str(i)+'.mp4'\n vid_dl.append(wget.download(url,path+name))#retrun the path of the saved video\n i = i+1\n return vid_dl", "def _download_to_df(url, table_name, year, month):\n # Insert the table_name, year and month into the url.\n url = url.format(table=table_name, year=year, month=str(month).zfill(2))\n # Download the file.\n r = requests.get(url)\n if r.status_code != 200:\n raise _MissingData((\"\"\"Requested data for table: {}, year: {}, month: {} \n not downloaded. Please check your internet connection. Also check\n http://nemweb.com.au/#mms-data-model, to see if your requested\n data is uploaded.\"\"\").format(table_name, year, month))\n # Convert the contents of the response into a zipfile object.\n zf = zipfile.ZipFile(io.BytesIO(r.content))\n # Get the name of the file inside the zip object, assuming only one file is zipped inside.\n file_name = zf.namelist()[0]\n # Read the file into a DataFrame.\n data = pd.read_csv(zf.open(file_name), skiprows=1)\n # Discard last row of DataFrame\n data = data[:-1]\n return data", "def download_extract_zip(url):\n response = requests.get(url)\n with ZipFile(BytesIO(response.content)) as thezip:\n for zipinfo in thezip.infolist():\n with thezip.open(zipinfo) as thefile:\n df = pd.read_csv(thefile)\n return (df)", "def test_fetch_traffic(self):\n assert isinstance(_tabular.fetch_traffic_data(), \n pd.DataFrame)", "def get_flagged_tile_list ( self ) :\n tile_list = []\n stmt = \"select name from sdb_product where sys003 =\\'T\\'\"\n self.oracle_cursor.arraysize = 100000\n self.oracle_cursor.execute(stmt)\n resultset = self.oracle_cursor.fetchmany()\n if resultset :\n for row in resultset :\n tile_list.append(str(row[0]))\n return tile_list", "def _extract_all(sel,\n rootpath=r'D:\\WorkStation_2018\\WorkStation_dynamicFC\\Workstation_dynamic_fc_baobaoComputer\\Data\\Dynamic',\n whichstate='state1'):\n\n # hc\n group_name = 'HC'\n data_hc = sel._extract_one(rootpath, whichstate, group_name)\n\n # mdd\n group_name = 'MDD'\n data_mdd = sel._extract_one(rootpath, whichstate, group_name)\n\n # bd\n group_name = 'BD'\n data_bd = sel._extract_one(rootpath, whichstate, group_name)\n\n # sz\n group_name = 'SZ'\n data_sz = sel._extract_one(rootpath, whichstate, group_name)\n\n # concat\n data_all = pd.concat([data_hc, data_mdd, data_bd, data_sz], axis=0)\n data_all.index = np.arange(0, np.shape(data_all)[0])\n\n return data_all", "def extract_work_info(self, data_items):\n result = []\n count = 0\n for data_item in data_items:\n keep = True\n if self.filters.get('min') != None and data_item['bookmarkCount'] < self.filters['min']:\n keep = False\n if self.filters.get('max') != None and data_item['bookmarkCount'] > self.filters['max']:\n keep = False\n if self.filters['multi'] == False and data_item['pageCount'] > 1:\n keep = False\n if keep:\n url = data_item['url']\n begin = url.find('img/')\n end = url.find('_master')\n url_info = url[begin + 4:end - 3] # no real source here since there might be multi images\n\n result.append({\n 'id': data_item['illustId'],\n 'name': data_item['illustTitle'], # filename\n 'username': data_item['userName'], # filename\n 'url_info': url_info, # for fetching real source\n 'count': data_item['pageCount'], # for fetching multiple images\n 'type': data_item['illustType'] # for determining picture/ugoira\n })\n count += data_item['pageCount']\n return result, count", "def slice_to_tiles(self, tile_raw_size=None, show_info=\"\"):\n if not tile_raw_size: tile_raw_size = self.tile_raw_size\n tile_raw_w,tile_raw_h = tile_raw_size\n tile_w,tile_h = round(tile_raw_w),round(tile_raw_h)\n\n if show_info:\n print(f\" ==Slicing {show_info} Tiles==\")\n print(f' Tile raw size: {tile_raw_size[0]} x {tile_raw_size[1]} px\\n')\n\n #process into list of image objects\n tiles = []\n true_x, true_y = (0,0)\n with Image.open(self.path) as img_obj:\n w,h = img_obj.size\n for row in range(0,h-tile_h,tile_h):\n tiles_row = []\n y = round(true_y)\n for col in range(0,w-tile_w,tile_w):\n x = round(true_x)\n im_crop = img_obj.crop((x,y,x+tile_w,y+tile_h))\n tiles_row.append(im_crop)\n true_x += tile_raw_w\n tiles.append(tiles_row)\n true_y += tile_raw_h\n true_x = 0\n\n return tiles", "def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):\n logging.info(\"Fetch housing data.....\")\n os.makedirs(housing_path, exist_ok=True)\n tgz_path = os.path.join(housing_path, \"housing.tgz\")\n urllib.request.urlretrieve(housing_url, tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path=housing_path)\n housing_tgz.close()", "def ferry_data_download(URL):\n explanation = 'File exists'\n file_downloaded = True\n # Request if the thredds server is working, add .html to URL\n req = requests.get(URL + '.html')\n if req.status_code == 200:\n \"\"\"File exists and is good for download, so write file\"\"\"\n print('File is ok')\n explanation = 'Good URL, File downloaded'\n file_downloaded = True\n ferry = xr.open_dataset(URL)\n else:\n print('File not found or unavailable')\n explanation = ' File not found or unavailable'\n file_downloaded = False\n ferry = np.nan\n return (ferry, file_downloaded, explanation)", "def getIndicePreciosInternosAlPorMayorBase2015(self):\n #Obtener la url de descarga del cvs\n urlPackage=\"https://datos.gob.ar/api/3/action/package_show?id=sspm-indice-precios-internos-basicos-al-por-mayor-dic-2015-100\"\n s=requests.get(urlPackage).content\n objJson = json.loads(s)\n resultado = objJson['result']['resources']\n selector = 0\n ultimoResultado = resultado[selector]\n urlDescarga = ultimoResultado['url']\n descripcion = ultimoResultado['description']\n print(\"Descargando: {}\".format(descripcion))\n print(\"Archivo: {}\".format(urlDescarga))\n \n #Descargar la url con cvs y generar pandas dataframe\n contenidoCVS = requests.get(urlDescarga).content\n flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))\n df_temp = pd.read_csv(flujoCVS)\n \n #transform string to datetime\n df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')\n df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date\n #set index\n df_temp.set_index('indice_tiempo', inplace=True)\n \n return df_temp", "def extract_imits(spark_session: SparkSession) -> DataFrame:\n print(spark_session)", "def to_dataframe(self, **kwargs):\n if not self.fetcher:\n raise InvalidFetcher(\n \" Initialize an access point (%s) first.\"\n % \",\".join(self.Fetchers.keys())\n )\n return self.load().data.to_dataframe(**kwargs)", "def download_tile(map_layer, zoom, x, y):\n try:\n tile_url = map_layer.get_tile_url(zoom, x, y)\n tmp_file, headers = urllib.request.urlretrieve(tile_url)\n return (x, y), tmp_file\n except URLError as e:\n app.logger.info(\"Error downloading tile x={}, y={}, z={} for layer {}: {}\".format(\n x, y, zoom, map_layer, e.reason))\n return (x, y), pkg_resources.resource_filename(\"geos\", \"static/empty_tile.png\")", "def PIL_dataframe(self, ropi_map_list):\n ropi_df = []\n ropi_labels = []\n\n for i, file in enumerate(ropi_map_list):\n if (np.all(file == False)):\n ropi_df.append(None)\n ropi_labels.append(None)\n else:\n ob_labels = label(file, connectivity=2)\n ropi_df.append(self.get_prop_labels(file))\n ropi_labels.append(ob_labels)\n\n return ropi_df, ropi_labels", "def extract_ipo_data_to_dataframe(output_path: str):\n ipo_guiden_url = \"https://www.affarsvarlden.se/ipo-guiden/screener\"\n driver = web_scraper.get_chrome_driver_for_url(url=ipo_guiden_url)\n # Removes pop up\n import time\n time.sleep(2)\n web_scraper.remove_ad(chrome_driver=driver)\n # Scroll to get rid of dynamic ad at top of webpage\n driver.execute_script(f\"window.scrollTo(0, {1080})\")\n web_scraper.toggle_all_keys_to_true(chrome_driver=driver, is_default_layout=True)\n web_scraper.show_all_ipos(chrome_driver=driver)\n table = web_scraper.get_ipo_table(driver=driver, id=\"datatable_overview\")\n header_labels = web_scraper.get_ipo_headers(table=table)\n flag_column_ind = header_labels.index(dfk.N_FLAGS)\n flag_dict = web_scraper.get_flag_dict(chrome_driver=driver, table=table, flag_column_ind=flag_column_ind)\n df_dict = web_scraper.extract_table_body_to_dict(table=table,\n header_labels=header_labels)\n df_dict.update(flag_dict)\n dataframe = pd.DataFrame(data=df_dict)\n print(\"Sneak peek of the goodies:\")\n print(dataframe)\n print(f\"Saving Dataframe to {output_path}\")\n dataframe.to_csv(output_path)", "def extract_wepages(self, webpage_dir: str) -> pd.DataFrame:\n webpage_results = defaultdict(list)\n\n for fn in glob.iglob(os.path.join(webpage_dir, \"*.html\")):\n indx = int(os.path.basename(fn).replace(\".html\", \"\"))\n res = self.parse_webpage(fn)\n webpage_results['index'].append(indx)\n for rn, rv in res.items():\n webpage_results[rn].append(rv)\n\n return pd.DataFrame.from_dict(webpage_results)", "def download_filings(self):\n filings = []\n\n with IndexDb.get_session() as session:\n q = session.query(Filing, Company)\\\n .filter(Company.cik == Filing.cik_trust)\\\n .order_by(Filing.date_filing, Filing.acc_no)\n\n # Exit if index is empty\n if q.count() == 0:\n session.close()\n print(\"Index is empty! Please rebuild.\")\n sys.exit(1)\n\n # Apply document limit\n if self.n_limit:\n q = q[:self.n_limit]\n\n # Only leave filings that have not been downloaded\n if self.rebuild:\n print(f\"{ats()} Updating index...\")\n for row in q.all():\n row.Filing.is_downloaded=False\n print(f'{ats()} Done!')\n else:\n q = q.filter(Filing.id_downloaded == False)\n\n # Filter by user-defined asset type\n q = q.filter(Company.asset_type.in_(self.asset_types))\n filings = q.all()\n\n # Prepare storage\n if self.use_s3:\n # Use S3\n filings_path = os.path.dirname(__file__)\n s3_client = boto3.client('s3')\n bucket_name = defaults['s3_bucket']\n s3_resource = boto3.resource('s3')\n # Delete all folders in the bucket\n if self.rebuild:\n bucket_obj = s3_resource.Bucket(bucket_name)\n bucket_obj.objects.all().delete()\n else:\n # Use local storage\n filings_path = os.path.join(os.path.dirname(__file__), defaults['filings_folder'])\n # Remove folder if downloading from scratch\n if os.path.exists(filings_path) and self.rebuild:\n shutil.rmtree(filings_path)\n # Create folder if not exists\n if not os.path.exists(filings_path):\n os.mkdir(filings_path)\n\n # Iterate through entries on the index\n doc_counter = 0\n for row in filings: # row contains two objects: Filing and Company\n # Build filename\n xml_name = row.Filing.url.split(\"/\")[-1] # Original filename from filing\n filename = \"_\".join([row.Filing.date_filing.strftime(\"%Y-%m-%d\"), str(row.Filing.acc_no), xml_name])\n # Build filepath\n if self.use_s3:\n # Use project folder for temporary storage\n subfolder_path = filings_path\n else:\n # Create folder tree\n asset_path = os.path.join(filings_path, row.Company.asset_type)\n if not os.path.exists(asset_path):\n os.mkdir(asset_path)\n subfolder_path = os.path.join(asset_path, row.Company.name)\n # Create subfolder if not exists\n if not os.path.exists(subfolder_path):\n os.mkdir(subfolder_path)\n\n # Download file\n download_path = os.path.join(subfolder_path, filename)\n print(\"-\"*5)\n print(f\"{ats()} Downloading document {row.Filing.url} ...\")\n failed_counter = 0\n downloaded = False\n try:\n downloaded = FileDownloader.download(row.Filing.url, download_path)\n except:\n print(f\"{ats()} Download failed for document {row.Filing.url} Skipping...\")\n failed_counter += 1\n if failed_counter == 5:\n print(f\"{ats()} Failed downloading several documents. Aborting...\")\n sys.exit(1)\n\n if downloaded:\n print(f\"{ats()} Downloaded successfully!\")\n # Upload to s3\n if self.use_s3 and downloaded:\n s3_path_components = [row.Company.asset_type, row.Company.name, filename]\n s3_path = \"/\".join(s3_path_components)\n try:\n # Check if file exists on s3\n s3_resource.Object(bucket_name, s3_path).load()\n except:\n print(f\"{ats()} Uploading to s3...\")\n s3_client.upload_file(download_path, bucket_name, s3_path)\n print(f'{ats()} Uploaded document {s3_path}')\n os.remove(download_path)\n # Update index\n with IndexDb.get_session() as session:\n f = session.query(Filing).get(row.Filing.acc_no)\n if f is not None:\n f.is_downloaded = True\n # f.update()\n doc_counter += 1\n else:\n print(f\"{ats()} Could not download url: {row.Filing.url}\")\n\n if self.use_s3:\n print(f'{ats()} Finished. Downloaded and uploaded to s3 {doc_counter} documents.')\n else:\n print(f'{ats()} Finished. Downloaded {doc_counter} documents.')", "def download_files(self):", "def trip_report_builder(df):\n count = 0\n for row in range(len(df)):\n if df['numReports'][row]:\n title = df['hike_name'][row]\n url = df['url'][row]\n iterate_all_reports(title, url)\n save_trail_html(title, url)\n count += 1\n print(f'Unique Trails {count}')\n else:\n continue\n return None", "def get_mta_data2():\n\n url = \"http://web.mta.info/developers/data/nyct/turnstile/turnstile_{}.txt\"\n dfs = []\n\n for week_num in week_nums:\n file_url = url.format(week_num)\n dfs.append(\n pd.read_csv(\n file_url, parse_dates=[[\"DATE\", \"TIME\"]], keep_date_col=True\n )\n )\n return pd.concat(dfs)", "def to_dataframe(self, **kwargs):\n if not self.fetcher:\n raise InvalidFetcher(\n \" Initialize an access point (%s) first.\"\n % \",\".join(self.Fetchers.keys())\n )\n return self.fetcher.to_dataframe(**kwargs)", "def populated_archivist_dataset(archivist_dataset, tmp_path_factory):\n wpath = tmp_path_factory.mktemp(\"archivistds\")\n\n ads = archivist_dataset\n\n dscontent = (\n ('azip/file1.txt', 'zipfile1'),\n ('azip/file2.csv', 'zipfile2_muchcontent'),\n ('atar/file1.txt', 'tarfile1'),\n ('atar/file2.csv', 'tarfile2_muchcontent'),\n )\n srcds = Dataset(wpath / 'srcds').create(**nonoise)\n for fpath, fcontent in dscontent:\n fpath = srcds.pathobj / (PurePosixPath(fpath))\n fpath.parent.mkdir(parents=True, exist_ok=True)\n fpath.write_text(fcontent)\n srcds.save(**nonoise)\n\n archive_root = wpath / 'myarchive'\n #archivetype = 'zip'\n\n akeys = {}\n\n # no ZIP just yet\n # for archivetype, ext in (('zip', ''), ('tar', '.gz')):\n for archivetype, ext in (('tar', '.gz'), ):\n archive_path = Path(f\"{archive_root}.{archivetype}{ext}\")\n\n archive_path_inds = ads.pathobj / '.archives' / archive_path.name\n # create an archive, the easy way, by simply exporting the\n # entire dataset worktree\n srcds.export_archive(archive_root, archivetype=archivetype,\n **nonoise)\n assert archive_path.exists()\n\n # add the archive (in a hidden dir) to be able to reference\n # it via a key\n aurl = archive_path.as_uri()\n ads.repo.call_annex([\n 'addurl', '--file', str(archive_path_inds), aurl])\n ads.save(**nonoise)\n # get the key of the archive\n akeys[archivetype] = ads.status(\n archive_path_inds, annex='basic', return_type='item-or-list',\n **nonoise)['key']\n return ads, akeys, archive_root, dscontent", "def to_data_frame(self, all_entries):\n\n c_attr = ['ref_id', 'title', 'authors', 'publication', 'volume', 'issue',\n 'series', 'date', 'pages', 'volume', 'scopus_link', 'doi', 'pii',\n 'pdf_link', 'scopus_cite_count']\n all_data = []\n for entry in all_entries:\n all_data.append([getattr(entry, x) for x in c_attr])\n\n return pd.DataFrame(all_data, columns=c_attr)", "def to_xyz_tiles(\n self, root: str, tile_size: int, zoom_levels: list, driver=\"GTiff\", **kwargs\n ):\n mName = os.path.normpath(os.path.basename(root))\n\n def create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n def tile_window(shape, px):\n \"\"\"Yield (left, upper, width, height).\"\"\"\n nr, nc = shape\n lu = product(range(0, nc, px), range(0, nr, px))\n\n ## create the window\n for l, u in lu:\n h = min(px, nr - u)\n w = min(px, nc - l)\n yield (l, u, w, h)\n\n vrt_fn = None\n prev = 0\n nodata = self.nodata\n obj = self._obj.copy()\n zls = {}\n for zl in zoom_levels:\n diff = zl - prev\n pxzl = tile_size * (2 ** (diff))\n\n # read data from previous zoomlevel\n if vrt_fn is not None:\n obj = xr.open_dataarray(vrt_fn, engine=\"rasterio\").squeeze(\n \"band\", drop=True\n )\n x_dim, y_dim = obj.raster.x_dim, obj.raster.y_dim\n obj = obj.chunk({x_dim: pxzl, y_dim: pxzl})\n dst_res = abs(obj.raster.res[-1]) * (2 ** (diff))\n\n if pxzl > min(obj.shape):\n logger.warning(\n f\"Tiles at zoomlevel {zl} smaller than tile_size {tile_size}\"\n )\n\n # Write the raster paths to a text file\n sd = join(root, f\"{zl}\")\n create_folder(sd)\n txt_path = join(sd, \"filelist.txt\")\n file = open(txt_path, \"w\")\n\n for l, u, w, h in tile_window(obj.shape, pxzl):\n col = int(np.ceil(l / pxzl))\n row = int(np.ceil(u / pxzl))\n ssd = join(sd, f\"{col}\")\n\n create_folder(ssd)\n\n # create temp tile\n temp = obj[u : u + h, l : l + w]\n if zl != 0:\n temp = temp.coarsen(\n {x_dim: 2**diff, y_dim: 2**diff}, boundary=\"pad\"\n ).mean()\n temp.raster.set_nodata(nodata)\n\n if driver == \"netcdf4\":\n path = join(ssd, f\"{row}.nc\")\n temp = temp.raster.gdal_compliant()\n temp.to_netcdf(path, engine=\"netcdf4\", **kwargs)\n elif driver in gis_utils.GDAL_EXT_CODE_MAP:\n ext = gis_utils.GDAL_EXT_CODE_MAP.get(driver)\n path = join(ssd, f\"{row}.{ext}\")\n temp.raster.to_raster(path, driver=driver, **kwargs)\n else:\n raise ValueError(f\"Unkown file driver {driver}\")\n\n file.write(f\"{path}\\n\")\n\n del temp\n\n file.close()\n # Create a vrt using GDAL\n vrt_fn = join(root, f\"{mName}_zl{zl}.vrt\")\n gis_utils.create_vrt(vrt_fn, file_list_path=txt_path)\n prev = zl\n zls.update({zl: float(dst_res)})\n del obj\n\n # Write a quick data catalog yaml\n yml = {\n \"crs\": self.crs.to_epsg(),\n \"data_type\": \"RasterDataset\",\n \"driver\": \"raster\",\n \"path\": f\"{mName}_zl{{zoom_level}}.vrt\",\n \"zoom_levels\": zls,\n }\n with open(join(root, f\"{mName}.yml\"), \"w\") as f:\n yaml.dump({mName: yml}, f, default_flow_style=False, sort_keys=False)", "def scns2tilecache_all_avail(self):\n scn_lst = self.get_scnlist_tilecache()\n for scn in scn_lst:\n self.scn2tilecache(scn)", "def download_all_maps(self):\n return self._download_all_maps_recur()", "def get_data_without_transactions():\n _, res = DBX.files_download(c.io.FILE_DATA)\n\n dfs = {x: pd.read_excel(io.BytesIO(res.content), sheet_name=x) for x in c.dfs.ALL_FROM_DATA}\n\n return dfs", "def download_thumb():\n\n curItem = treeview.focus().strip('#')\n select_values = series_dict[curItem]\n print(curItem)\n with open(\"images_url_dict.json\", \"r\") as f53:\n imgs_dict = json.load(f53)\n\n name = \"-\".join(curItem.lower().split())\n img_list = imgs_dict[name]\n img_url = img_list[0]\n\n r = requests.get(img_url, stream=True, headers={'User-agent': 'Mozilla/5.0'})\n path = \"thumbnails\\\\{}.jpg\".format(name)\n if r.status_code == 200:\n with open(path, 'wb') as f3:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f3)\n print(\"Done downloading\")\n select_values = series_dict[curItem]\n editent2var.set(path)\n with open('series_table.json', 'w') as f5:\n json.dump(series_dict, f5, indent=2)", "def buildings(self) -> DataFrame:\n raw = self._get_buildings()\n if raw:\n return ensure_camel_columns(read_json(json.dumps(raw)).set_index(\"id\"))\n raise IOError(\"Empty response from web request.\")", "def fetchall_df(result_proxy):\n# result = result_proxy.fetchall(keep_col_names=T) ???\n result = [row for row in tqdm(result_proxy)]\n return pd.DataFrame(result, columns=result[0].keys())" ]
[ "0.5520219", "0.55059403", "0.5477932", "0.5469635", "0.5457995", "0.54150224", "0.5368179", "0.534763", "0.5336239", "0.53228396", "0.53197813", "0.5276907", "0.5260043", "0.52545166", "0.52139163", "0.521263", "0.5197093", "0.5197093", "0.5183362", "0.51803267", "0.5175006", "0.515176", "0.5147438", "0.5142199", "0.5139298", "0.51365477", "0.51348084", "0.51330566", "0.5122389", "0.5121921", "0.5112029", "0.510448", "0.5099313", "0.5098758", "0.50957614", "0.50944585", "0.50852", "0.5067772", "0.5043589", "0.5038753", "0.50373507", "0.50295275", "0.502027", "0.5019915", "0.50198036", "0.50175095", "0.5012135", "0.5009862", "0.500625", "0.49884033", "0.49869904", "0.4986638", "0.49853975", "0.49809375", "0.49786305", "0.49761122", "0.49756324", "0.49706942", "0.4941175", "0.49365598", "0.4933421", "0.4929965", "0.4929232", "0.4925181", "0.49241492", "0.4917568", "0.49016708", "0.4898923", "0.4895825", "0.48909605", "0.48901135", "0.4889076", "0.48781607", "0.48775032", "0.48688096", "0.48651543", "0.48642305", "0.485812", "0.48558992", "0.48508453", "0.48418483", "0.4828086", "0.4827885", "0.48263034", "0.48211828", "0.4813984", "0.48037434", "0.47936434", "0.47905633", "0.4788465", "0.47862405", "0.4777557", "0.47759128", "0.4773559", "0.47668335", "0.47626537", "0.4762578", "0.47588718", "0.47578952", "0.47544414", "0.47490293" ]
0.0
-1
Infinite sequence of integers.
def integers(): i = 1 while True: yield i i = i + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item", "def infinite_increment():\n i = 0\n while 1:\n yield i\n i += 1", "def int_to_seq(i):\n\ts = []\n\tprime = xprimes()\n\twhile i != 1:\n\t\ts.append(0)\n\t\tp = next(prime)\n\t\twhile i % p == 0:\n\t\t\ts[-1] += 1\n\t\t\ti /= p\n\treturn s", "def infinite_odd_generator():\n current = 1\n while True:\n yield current\n current = current + 2", "def simple_range(limit):\n i = 0\n while i < limit:\n yield i\n i += 1", "def xrange1(value):\n try:\n i = int(value)\n return [x+1 for x in xrange(i)]\n except:\n return []", "def seq_ints(n, start=0, step=1):\n return list(range(start, start + n*abs(step), step))", "def int_to_inv_seq(i, n):\n\tinv_seq = []\n\twhile n > 1:\n\t\tfct = factorial(n - 1)\n\t\tinv_seq.append(i / fct)\n\t\ti %= fct\n\t\tn -= 1\n\tinv_seq.append(0)\n\treturn inv_seq", "def xrange0(value):\n try:\n i = int(value)\n return list(xrange(i))\n except:\n return []", "def id_generator():\n start_value = 0\n while True:\n yield start_value\n start_value += 1", "def local_seq():\n return st.integers(min_value=1)", "def numeric_sequence_iteration(self) -> global___Statement.Iteration.NumericSequenceIteration:", "def task_10_generator_of_simple_numbers() -> Generator[int, None, None]:\n def is_num_simple(n):\n \"\"\"\n Return: True if n is a simple number or False if it is not\n \"\"\"\n for i in range(n, 1, -1):\n if n % i == 0 and i < n and n != 1:\n return False\n return True\n\n # generator part\n n = 2\n while n < 200:\n if is_num_simple(n):\n yield n\n n = n + 1", "def generate_sequence(n):\n\n sequence = []\n\n # generate sequence\n while n != 1:\n sequence.append(n)\n n = next_integer(n)\n\n # append 1 to sequence since all sequences assumed to end in 1\n sequence.append(1)\n\n return sequence", "def gen_num(lim=10000):\n n = 1\n yield 2\n yield 3\n while 6 * n + 1 <= lim:\n yield 6 * n - 1\n yield 6 * n + 1\n n += 1", "def e_seq():\n yield 2;\n for n in count(2, 2):\n yield 1\n yield n\n yield 1", "def numbers():\n for number in range(1, 76):\n yield number", "def fibonacci():\n yield 0\n element = yield 1\n previous = element\n while element < 1e100:\n current = yield element\n element = previous + current\n if current > 1:\n previous = current\n\n return element", "def simple_seq(seq):\n for i in seq:\n yield i", "def iseq(start=0, stop=None, inc=1):\n if stop is None: # allow isequence(3) to be 0, 1, 2, 3\n # take 1st arg as stop, start as 0, and inc=1\n stop = start; start = 0; inc = 1\n return range(start, stop+inc, inc)", "def to_int(a):\n i = 0\n while a:\n i += 1\n a = a.next\n return i", "def natural_numbers():\n \n acc=0\n for x in range(1000): \n if x%3==0 or x%5==0:\n acc=acc+x\n return acc", "def fib(limit):\n a, b = 0, 1\n while a <= limit:\n yield a\n a, b = b, a + b", "def range() -> List[int]:\n pass", "def section_4_7():\n import itertools\n\n def test1():\n def count(n):\n while True:\n yield n\n n += 1\n\n c = count(0)\n for x in itertools.islice(c, 10, 20):\n print(x)\n\n test1()", "def get_integers(bitwidth: int, unsigned: bool, limit: int = 0) -> Generator:\n if unsigned:\n start, stop = 0, ((1 << bitwidth) - 1)\n else:\n start, stop = (-(1 << bitwidth - 1)), (1 << (bitwidth - 1) - 1)\n\n for num in _fuzzdb_integers(limit):\n if num >= start and num <= stop:\n yield num", "def renumber():\n\n counter = itertools.count(1)\n while True:\n yield 's%s'%counter.next()", "def fibonacci() -> Iterator[int]:\n a, b = 0, 1\n while True:\n yield a\n a, b = b, a + b", "def just(n, seq):\n it = iter(seq)\n for _ in range(n - 1):\n yield next(it, None)\n yield tuple(it)", "def fibonacci():\n\ta, b = 0, 1\n\tyield 0\n\twhile True:\n\t\ta, b = b, a + b\n\t\tyield a", "def fibonacciSequence(number:int) -> List[int]:\n return [fibonacci(num) for num in range(number + 1)]", "def lucas_lehmer() -> Generator[int, None, None]:\n seed = 4\n while True:\n yield seed\n seed = seed**2 - 2", "def id_generator():\n\t\tcount = 0\n\t\twhile True:\n\t\t\tyield count\n\t\t\tcount += 1", "def ag(n):\r\n s = h()\r\n while n >0:\r\n next(s)\r\n n -= 1\r\n return next(s)", "def fibonacci_sequence(max):\n term = fibonacci_term(0)\n f = []\n i = 1\n while term < max:\n f.append(term)\n term = fibonacci_term(i)\n i += 1\n return f", "def next ( num = 1 ) :\n return run ( num )", "async def a_enumerate(seq, start=0):\n i = start\n async for val in seq:\n yield i, val\n i += 1", "def numbers(num):\n r = []\n for i in range(num):\n d = len(r)\n r = [1 if i == 0 or i == d else r[i-1]+r[i] for i in range(d+1)]\n yield r", "def countdown():\n for i in range(100, 0, -1):\n yield i", "def _mc_gen():\r\n n = 1\r\n while True:\r\n yield n\r\n n += 1", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def main():\n # [] -> 0\n print length_asc_seq([])\n\n # [1] -> 1\n print length_asc_seq([1])\n\n # [1, 1] -> 2\n print length_asc_seq([1, 1])\n\n # [1, 3, 5, 7] -> 4\n print length_asc_seq([1, 3, 5, 7])\n\n # [-1, -5, 12, -9, 13] -> 3\n print length_asc_seq([-1, -5, 12, -9, 13])", "def I (self, n):", "def _ids(self):\n prev_values = set()\n while True:\n next_value = self._time_ns()\n while True:\n if next_value not in prev_values:\n break\n next_value += 1000\n prev_values.add(next_value)\n yield next_value", "def __next__(self):\n self.n += 2\n if self.n > self.container.maximum:\n raise StopIteration\n return self.n", "def __next__(self):\n self.n += 2\n if self.n > self.container.maximum:\n raise StopIteration\n return self.n", "def generator(factor: int, test: typing.Callable[[int], bool],\n start: int) -> typing.Iterator[int]:\n value = start\n while True:\n value = (value * factor) % 2147483647\n if test(value):\n yield value", "def nextInt(self) -> int:\n raise NotImplementedError", "def zeros(n):\n return [0 for i in range(n)]", "def fib(number: int) -> int:\n return next(islice(generator(number), number, number + 1))", "def gen_fib_neg():\n a, b = 0, 1\n yield a\n yield b\n while True:\n a, b = b, a - b\n yield b", "def fibonacci(n):\n a = 0\n b = 1\n counter = 0\n while True:\n if (counter > n): return\n yield a\n a = b\n b = a + b\n counter += 1", "def forever(iterable):\n it = iter(iterable)\n while True:\n try:\n yield next(it)\n except Exception as e:\n print(e)\n it = iter(iterable)", "def _fuzzdb_integers(limit: int = 0) -> Generator:\n path = _get_fuzzdb_path() / Path(\"integer-overflow/integer-overflows.txt\")\n with open(path, \"rb\") as stream:\n for line in _limit_helper(stream, limit):\n yield int(line.decode(\"utf-8\"), 0)", "async def aenumerate(asequence, start=0):\n n = start\n async for elem in asequence:\n yield n, elem\n n += 1", "def take(n, seq):\n seq = iter(seq)\n result = []\n try:\n for i in range(n):\n result.append(next(seq))\n except StopIteration:\n pass\n return result", "def iter_zeros(self):\n num = quotient = 0\n while num < self._len:\n chunk = self.data[quotient]\n if chunk & self.zero_mask:\n remainder = 0\n while remainder < self.width and num < self._len:\n item = (chunk >> remainder) & 3\n if item == PC_ZERO:\n yield num\n remainder += 2\n num += 1\n else:\n num += (self.width >> 1)\n quotient += 1", "def generate_digits(maximum): \n\tcounter = 1\n\tdigitcount = 1\n\twhile digitcount <= maximum:\n\t\tfor digit in list(str(counter)):\n\t\t\tyield int(digit)\n\t\t\t\n\t\tcounter += 1\n\t\tdigitcount += len(list(str(counter)))", "def range_loop(max_val, step=1):\n return range(1, max_val+step, step)", "def fib():\n x, y = 0, 1\n while True:\n yield x\n x, y = y, x + y", "def forever():\n\n def animate(thing):\n thing = list(thing)\n yield from repeat(thing)\n return animate", "def generate_int(self, num):\n int_num = []\n for _ in range(num):\n int_num.append(self.fake.pyint(min_value=0, max_value=10000000000000000))\n return int_num", "def next(self, n: int | None = None) -> Any:\n self._fillcache(n)\n if not n:\n if self._cache[0] == self.sentinel:\n raise StopIteration\n if n is None:\n result = self._cache.popleft()\n else:\n result = []\n else:\n if self._cache[n - 1] == self.sentinel:\n raise StopIteration\n result = [self._cache.popleft() for i in range(n)]\n return result", "def gen_Numbers(f, L):\r\n t = 1\r\n while True:\r\n for k in L:\r\n t += k\r\n yield t\r\n # endless if no edge given\r\n if (f <= 0):\r\n continue\r\n # but let it end if running sum runs over given edge\r\n if (t >= f):\r\n return None", "def odd_generator(limit):\n current = 1\n while current < limit:\n yield current\n current = current + 2", "def longincseq(v):\n n=len(v)\n if n==0: return -1\n l = 0\n u = n-1\n max2here=1\n maxsofar=1\n for i in xrange(l+1, u+1):\n if v[i]>v[i-1]: \n max2here+=1\n else:\n max2here=1\n maxsofar = max(maxsofar, max2here)\n return maxsofar", "def t_fibonnaci():\n a = 1\n b = 1\n c = a + b\n while True:\n yield c\n a = b + c\n b = c + a \n c = a + b", "def fibonacci(a, b, limit=None):\n loop_indefinitely = limit is None\n while loop_indefinitely or b < limit:\n yield b\n a, b = b, a + b", "def triangle_numbers():\n counter, tri_number = 1, 1\n while True:\n yield tri_number\n counter += 1\n tri_number += counter", "def fibonacci(n: int):\n a, b, counter = 0, 1, 0\n while True:\n if (counter > n):\n return\n yield a\n a, b = b, a + b\n counter += 1", "def get_fibonacci_list(limit):\n if limit == 1:\n return [1]\n\n n1 = 0\n n2 = 1\n nth = 1\n\n result = []\n\n while nth < limit:\n result.append(nth)\n\n nth = n1 + n2\n n1 = n2\n n2 = nth\n\n # Remove duplicate 1's at beginning\n result.pop(0)\n\n return result", "def generator(self):\n return [None, 1]", "def fibonacci(num):\n counter = 0\n\n # Start fibonacci\n sequence = [0, 1]\n while len(sequence) < num:\n n1 = sequence[counter]\n n2 = sequence[counter + 1]\n sequence.append(n1+n2)\n\n counter += 1\n\n return sequence", "def digit_generator(N=1_000_000):\n i = 0\n number = 1\n while N > i:\n for _i in str(number):\n yield _i\n i += 1\n number += 1", "def enumerate_(start = 0):\n\n @filters\n def _dagpype_internal_fn_act(target):\n count = start \n try:\n while True:\n e = (yield)\n target.send((numpy.arange(count, count + len(e)), e))\n count += len(e)\n except GeneratorExit: \n target.close() \n\n return _dagpype_internal_fn_act", "def __iter__(self):\n return iter(range(1, self.size() + 1))", "def xrange(*args):\n len_args = len(args)\n if len_args == 1:\n stop = int(args[0])\n start = 0\n step = 1\n elif len_args == 2:\n start = int(args[0])\n stop = int(args[1])\n step = 1\n elif len_args == 3:\n start = int(args[0])\n stop = int(args[1])\n step = int(args[2])\n else:\n raise TypeError(\"xrange() requires 1-3 int arguments\")\n if step < 0:\n bcmp = operator.gt\n elif step > 0:\n bcmp = operator.lt\n else:\n raise StopIteration\n act = int(start)\n while bcmp(act, stop):\n yield act\n act += step", "def prompt_for_numbers():\n\n numbers = []\n print(\"Enter a series of numbers, with -1 to quit\")\n\n num = 0\n\n while num != -1:\n num = int(input())\n\n if num != -1:\n numbers.append(num)\n\n return numbers", "def lucas():\n yield 2\n a = 2\n b = 1\n while True:\n yield b\n a, b = b, a + b", "def seq(n,x=0, y=1):\r\n if n==1:\r\n return x\r\n elif n==2:\r\n return y\r\n else:\r\n return seq(n-1,x,y)+seq(n-2,x,y)", "def test_generator_manual() -> None:\n reversed_int: List[int] = []\n\n generator = reverse([1, 2, 3])\n reversed_int.append(next(generator))\n reversed_int.append(next(generator))\n reversed_int.append(next(generator))\n\n with pytest.raises(StopIteration):\n next(generator)\n\n assert reversed_int == [3, 2, 1]", "def grange(start=1, step=1, stop=None):\n \n if stop is None:\n x = int(start)\n dif = int(step)\n while True:\n yield x\n x += dif\n else:\n for x in range(start, stop, step):\n yield x", "def prime_numbers(upto):\n sieve = BitArray(upto + 1, 1)\n for number in xrange(2, upto + 1):\n if not sieve[number]:\n continue\n yield number\n for multiple in xrange(number ** 2, upto + 1, number):\n sieve[multiple] = 0\n return", "def gen_primes():\n\n n = 1\n while True:\n while not isPrime(n):\n n += 1\n\n yield n\n n += 1", "def init_naive_array(n):\n result = list()\n for i in range(1, n+1):\n result.append(i)\n return result", "def fibonacci_inner_generator() -> Iterator[int]:\n yield 0\n yield 1\n fib1 = fibonacci_inner_generator()\n next(iter(fib1))\n yield from (f2 + f1 for f2, f1 in zip(fibonacci_inner_generator(), fib1))", "def _next_interval(self):\n return self.interval_generator()", "def count(seq):\n\treturn sum(1 for x in seq)", "def init_list(no_elements):\n\ti = 0\n\tnumbers\t= []\n\twhile i < no_elements:\n\t\tnumbers.append(i)\n\n\t\ti += 1\n\n\t# return initialized array\n\treturn numbers", "def fibonacci(n):\n a, b, counter = 0, 1, 0\n while True:\n if counter > n:\n return\n yield a\n a, b = b, a + b\n counter += 1", "def main():\n next_val_string = '1'\n\n for counter in range(0, 31):\n print(\"{}:\\t[{}]\".format(counter, len(next_val_string)))\n next_val_list = get_next_in_seq(next_val_string)\n next_val_string = count_array_to_string(next_val_list)\n\n # and so it ends with 5808", "def next(self, initial):", "def factorial_loop(n):\n\n pass # @todo -fix this", "def FastaM10Iterator(handle, seq_count=...):\n ...", "def test_withCountIntervalZero(self):\n clock = task.Clock()\n accumulator = []\n\n def foo(cnt):\n accumulator.append(cnt)\n if len(accumulator) > 4:\n loop.stop()\n\n loop = task.LoopingCall.withCount(foo)\n loop.clock = clock\n deferred = loop.start(0, now=False)\n\n clock.advance(0)\n self.successResultOf(deferred)\n\n self.assertEqual([1, 1, 1, 1, 1], accumulator)", "def rng() -> int:", "def fibonacci(n):\n a, b = 1, 1\n count = 0\n while count < n:\n yield a\n count += 1\n a, b = b, a+b", "def fibonacci_generator():\n fib_prev = 0 # prev fib number\n fib_cur = 1 # next fib number\n i = 1 # number position\n while True:\n yield i, fib_cur\n i += 1\n fib_prev, fib_cur = fib_cur, fib_prev + fib_cur", "def fib(n):\n l = [0,1]\n current = 1\n while current < n:\n l.append(current)\n current = l[-1] + l[-2]\n return l" ]
[ "0.7470997", "0.7343358", "0.6424951", "0.6344632", "0.63421345", "0.62673986", "0.60785764", "0.60572314", "0.60339946", "0.6026884", "0.5996939", "0.59359825", "0.5839094", "0.58256906", "0.58099836", "0.5782609", "0.57588905", "0.5748371", "0.5706836", "0.5637029", "0.5626966", "0.56118757", "0.5589854", "0.5553567", "0.55522346", "0.555092", "0.5545826", "0.5543459", "0.5538988", "0.5504037", "0.54715925", "0.546385", "0.5452543", "0.54503036", "0.5429608", "0.54257965", "0.542097", "0.53943294", "0.5386449", "0.5382983", "0.53819054", "0.5374939", "0.53644973", "0.5362006", "0.53474116", "0.53474116", "0.5339634", "0.53383076", "0.5338177", "0.5329375", "0.5324595", "0.53170586", "0.5310257", "0.5302129", "0.5300661", "0.52986604", "0.5282866", "0.52797806", "0.5263558", "0.5263331", "0.5259105", "0.5257316", "0.5255336", "0.5251875", "0.52517813", "0.52516526", "0.52501005", "0.5229019", "0.52262104", "0.52157503", "0.5203599", "0.52026194", "0.5198464", "0.5192007", "0.51848775", "0.5184152", "0.51828796", "0.5180286", "0.5179796", "0.51728755", "0.51678795", "0.51605445", "0.5157797", "0.515432", "0.51501185", "0.5148527", "0.5146505", "0.514611", "0.5143306", "0.5141635", "0.5141613", "0.513972", "0.51304054", "0.5128516", "0.5125438", "0.5119819", "0.51191926", "0.51176256", "0.5103821" ]
0.7472032
1
Returns first n values from the given sequence.
def take(n, seq): seq = iter(seq) result = [] try: for i in range(n): result.append(next(seq)) except StopIteration: pass return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def take(n, seq):\n return itertools.islice(seq, n)", "def nth(n, seq):\n try:\n return seq[n]\n except TypeError:\n return next(itertools.islice(seq, n, None))", "def take(iterable, n):\n return list(itertools.islice(iterable, n))", "def lookahead(n, iterable):\n for value in islice(copy.copy(iterable), n, None):\n return value\n raise IndexError(n)", "def take(n, iterable):\n return list(itertools.islice(iterable, n))", "def take(n, iterable):\n return list(itertools.islice(iterable, n))", "def just(n, seq):\n it = iter(seq)\n for _ in range(n - 1):\n yield next(it, None)\n yield tuple(it)", "def take(n, iterable):\n return list(islice(iterable, n))", "def take(n, iterable):\n return list(islice(iterable, n))", "def first_n(n):\r\n return Quantifier(\"first_{}\".format(n),\r\n isom=False, cons=True, lcons=False, rmon=True, lmon=None,\r\n fn=lambda seq: first_n_ver(seq, n),\r\n gen_fn=lambda verify_fn, truth_value, max_length: first_n_gen(n, verify_fn, truth_value, max_length))", "def look_ahead(self, n: int = 1):\n return self.data[self.end:self.end+n]", "def firstn(reader, n):\n\n # TODO(yuyang18): Check if just drop the reader, could clean the opened\n # resource or not?\n\n def firstn_reader():\n for i, item in enumerate(reader()):\n if i == n:\n break\n yield item\n\n return firstn_reader", "def first_n_ver(seq, n):\r\n # TODO: more complicated presupposition handling instead of just false?\r\n if len(seq) < n:\r\n return Quantifier.F\r\n\r\n num_AB = 0\r\n for item in seq:\r\n if num_AB >= n:\r\n return Quantifier.T\r\n # if an A-not-B found before n ABs are, return F\r\n if np.array_equal(item, Quantifier.AnotB) and num_AB < n:\r\n return Quantifier.F\r\n elif np.array_equal(item, Quantifier.AB):\r\n num_AB += 1\r\n\r\n if num_AB >= n:\r\n return Quantifier.T\r\n\r\n # there are less than n ABs in total\r\n return Quantifier.F", "def take_nth(n):\n def _take_nth_xducer(step):\n outer = {\"idx\": 0}\n def _take_nth_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n if outer[\"idx\"] % n:\n outer[\"idx\"] += 1\n return r\n else:\n outer[\"idx\"] += 1\n return step(r, x)\n return _take_nth_step\n return _take_nth_xducer", "def iter_n(sequence: Sequence[T], n: int) -> List[T]:\n\t\n\tfor i in range(len(sequence) - (n-1)):\n\t\tyield sequence[i:i+n]", "def next_n(iterator, N):\n try:\n items = []\n for _ in range(N):\n items.append(next(iterator))\n return items\n except StopIteration:\n if items:\n return items\n return None", "def take(n, iterable, islice=islice):\n return islice(iterable, n)", "def top_n(values, first_n=10):\n values = iter(values)\n top = [val for val in islice(values, first_n)]\n if len(top) < first_n:\n return top\n heapq.heapify(top)\n for val in values:\n heapq.heappushpop(top, val)\n return top", "def drop(n, seq):\n return itertools.islice(seq, n, None)", "def take(iterable, n):\n\n def taking(iterable_):\n for i, e in enumerate(iterable_):\n if i < n:\n yield e\n\n return taking(iterable)", "def nth(n, iterable, default = None):\n return next(islice(iterable, n, None), default)", "def split_by_n(seq, n):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def genslices(n):\n def range_with_none():\n yield None\n yield from range(-n, n+1)\n\n for t in product(range_with_none(), range_with_none(), range_with_none()):\n s = slice(*t)\n if s.step != 0:\n yield s", "def nth(iterable, n, default=None):\n return next(islice(iterable, n, None), default)", "def nth(iterable, n, default=None):\n return next(islice(iterable, n, None), default)", "def nth(iterable, n, default=None):\n return next(islice(iterable, n, None), default)", "def takespread(sequence, num):\n length = float(len(sequence))\n for i in range(num):\n yield sequence[int(np.ceil(i * length / num))]", "def window(seq, n):\n seq_it = iter(seq)\n result = tuple(it.islice(seq_it, n))\n if len(result) == n:\n yield result \n for elem in seq_it:\n result = result[1:] + (elem,)\n yield result", "def split_by_n( seq, n ):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def take(self, n): # noqa: N805\n return List(_islice(self, n))", "def nth(iterable, n, next=next, islice=islice, default=None):\n return next(islice(iterable, n, None), default)", "def takeNGenerator(seq, n):\n\tindex = 0\n\twhile index + n <= len(seq):\n\t\tyield seq[index:index + n]\n\t\tindex = index + 1", "def head(iterables, n=1):\n for index, item in enumerate(iterables):\n if index >= n:\n break\n yield item", "def takespread(sequence, num):\n length = float(len(sequence))\n for i in range(num):\n yield sequence[int(ceil(i * length / num))]", "def peek(self, n: int) -> Tuple:\n\n data = tuple(itertools.islice(self._iterator, n))\n self._iterator = itertools.chain(data, self._iterator)\n return data", "def get_n(self, n):\n \n return [self.get_values() for _ in range(n)]", "def sliding_window(seq, n=DEFAULT_WINDOW_WIDTH):\n it = iter(seq)\n result = tuple(islice(it, n))\n if len(result) == n:\n yield result \n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def window(seq, n=2):\n it = iter(seq)\n result = tuple(islice(it, n))\n if len(result) == n:\n yield result\n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def window(seq, n=2):\n it = iter(seq)\n result = tuple(islice(it, n))\n if len(result) == n:\n yield result\n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def next_n(self, n, fast_forward=False):\n return list(it.islice(self.gen, n))", "def take(num, iterable):\n return list(islice(iterable, num))", "def first_n_features(data, n=5000):\n for i, feature in enumerate(gffutils.iterators.DataIterator(data)):\n if i > n:\n break\n yield feature", "def splitevery(s, n):\n\treturn [s[x:x+n] for x in range(0,len(s), n)]", "def peek(self, n: int | None = None) -> Any:\n self._fillcache(n)\n if n is None:\n result = self._cache[0]\n else:\n result = [self._cache[i] for i in range(n)]\n return result", "def nth(iterable, index):\n return next(itertools.islice(iterable, index, None))", "def take(self, n: int = -1) -> List[T]:\n if n == -1:\n n = self._per_page\n\n if not isinstance(n, int) or n < 0:\n raise ArgumentError(\n f\"argument n={n} is invalid; n must be an int and n >= 1\"\n )\n\n it = iter(self)\n return list(itertools.islice(it, n))", "def __getitem__(self, n):\n if isinstance(n, slice):\n start, stop, step = n.start, n.stop, n.step\n if not start:\n start = 0\n if not stop:\n stop = len(self)\n if stop < 0:\n stop = len(self) + stop\n if start < 0:\n start = len(self) + start\n return self._fa.get_seq(self.name, start + 1, stop)[::step]\n\n elif isinstance(n, int):\n if n < 0:\n n = len(self) + n\n return self._fa.get_seq(self.name, n + 1, n + 1)", "def take_first(count):\n def _take_first(iterable):\n return islice(iterable, count)\n return pipe | set_name('take_first(%s)' % count, _take_first)", "def iwindow(seq, n):\n it = iter(seq)\n result = tuple(islice(it, n))\n\n if len(result) == n:\n yield result\n\n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def first(seq):\n return next(iter(seq))", "def nth_element(iterable, n, first=0, last=None, key=None):\n assert hasattr(iterable, '__getitem__')\n last = last or len(iterable)\n pivot_idx = n\n pivot_idx = partition_with_pivot(iterable, pivot_idx, first=first, last=last, key=key)\n if n == pivot_idx:\n return\n elif n < pivot_idx:\n return nth_element(iterable, n, first, pivot_idx, key=key)\n else:\n return nth_element(iterable, n, pivot_idx+1, last, key=key)", "def pull_reads(self, n, prng): \n if not self.sampling:\n self.convert_to_array()\n index = prng.randint(0, self.total, size=n)\n index = np.sort(index)\n return self.reads[index,:]", "def select(individuals, n):\r\n # return selBest(individuals, n)\r\n return individuals[:n]", "def compute_sequence(starting_numbers: List[int], n_elems: int) -> int:\n elems = list(reversed(starting_numbers))\n\n for _ in range(len(starting_numbers), n_elems):\n try:\n idx_before = elems.index(elems[0], 1)\n except ValueError:\n elems = [0] + elems\n else:\n elems = [idx_before] + elems\n\n return elems[0]", "def next(self, n: int | None = None) -> Any:\n self._fillcache(n)\n if not n:\n if self._cache[0] == self.sentinel:\n raise StopIteration\n if n is None:\n result = self._cache.popleft()\n else:\n result = []\n else:\n if self._cache[n - 1] == self.sentinel:\n raise StopIteration\n result = [self._cache.popleft() for i in range(n)]\n return result", "def window_trey1(sequence, n):\n # really nice use of 'shortest' zip behaviour\n sequences = [sequence[i:] for i in range(n)]\n return zip(*sequences)", "def iterate(iterator, n):\n # throw away n-1 elements\n for index in range(1, n):\n next(iterator, None)\n\n return next(iterator, None)", "def _get_slice(series, start, length):\n return [ int(s) for s in series[start:start+length] ]", "def eat(seq, n=None):\n if n is None:\n collections.deque(seq, maxlen=0)\n else:\n next(itertools.islice(seq, n, n), None)", "def seq_ints(n, start=0, step=1):\n return list(range(start, start + n*abs(step), step))", "def chunks(seq: Sequence[T], n: int) -> Iterator[Sequence[T]]:\n for i in range(0, len(seq), n):\n yield seq[i:i + n]", "def generate_sequence(self, n=100, initial_state=None):\n\n if initial_state is None:\n if self.pad:\n sequence = [START_OF_SEQ] * self.order\n else:\n sequence = list(random.choice(self.records.keys()))\n else:\n sequence = initial_state[:]\n\n for i in range(n):\n current_state = tuple(sequence[-self.order:])\n next_token = self.sample(current_state)\n sequence.append(next_token)\n\n if next_token == END_OF_SEQ:\n return sequence\n\n return sequence", "def drop(iterable, n, islice=islice):\n return islice(iterable, n, None)", "def chunk_it(seq, n):\n\n avg = len(seq) / float(n)\n out = []\n last = 0.0\n while last < len(seq):\n out.append(seq[int(last):int(last + avg)])\n last += avg\n return out", "def last_n_ver(seq, n):\r\n return first_n_ver(list(reversed(seq)), n)", "def take(n):\n def _take_xducer(step):\n outer_vars = {\"counter\": n}\n def _take_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n n = outer_vars[\"counter\"]\n outer_vars[\"counter\"] -= 1\n r = step(r, x) if n > 0 else r\n return ensure_reduced(r) if outer_vars[\"counter\"] <= 0 else r\n return _take_step\n return _take_xducer", "def head(self, n=6):\n if self._cached_index is None:\n return self._load_index(n)\n else:\n return self._index().head(n)", "def get_every_nth_state(n=10):\n new_states = []\n for i, state in enumerate(states, 1):\n if i % n == 0:\n new_states.append(state)\n return new_states", "def skip(self, n):\n return self.__class__(itertools.islice(self, n, None))", "def peek_list(self, n):\n return self._buffer[self.pos:self.pos+n]", "def next_n(self, n: int, fast_forward=False):\n data = []\n while len(data) < n:\n try:\n record = self.queue.get(True, self.wait)\n data.append(record)\n except Empty:\n raise StopIteration\n return data", "def _select_n(arr, n):\n selection = []\n\n idx = range(0, len(arr))\n for x in range(n):\n if len(idx) == 0:\n break\n i = randint(0, len(idx) - 1)\n selection.append(arr[idx[i]])\n del idx[i]\n\n return selection", "def peek(self, n_ahead=1):\n return self.state((self.idx + n_ahead) % len(self))", "def imputer(seq, n=500):\n cur = len(seq)\n if cur < n:\n return np.concatenate((seq, np.zeros(n - cur)))\n return seq[: n]", "def get_spread(array, n):\n end = len(array) - 1 \n spread = np.ceil(np.linspace(0, end, n)).astype(int)\n indices = np.unique(spread)\n try:\n return array[indices]\n except KeyError:\n return array.iloc[indices]", "def first_rows(self, n: int) -> \"SampleDataSet\":\n return SampleDataSet(self._data.iloc[:n].copy())", "def nth(_list, n):\n n = lloc(_list, n)\n return [a[n] for a in _list]", "def _get_sequence(value, n, channel_index, name):\n # Performance is fast-pathed for common cases:\n # `None`, `list`, `tuple` and `int`.\n if value is None:\n return [1] * (n + 2)\n\n # Always convert `value` to a `list`.\n if isinstance(value, list):\n pass\n elif isinstance(value, tuple):\n value = list(value)\n elif isinstance(value, int):\n value = [value]\n elif not isinstance(value, collections_abc.Sized):\n value = [value]\n else:\n value = list(value) # Try casting to a list.\n\n len_value = len(value)\n\n # Fully specified, including batch and channel dims.\n if len_value == n + 2:\n return value\n\n # Apply value to spatial dims only.\n if len_value == 1:\n value = value * n # Broadcast to spatial dimensions.\n elif len_value != n:\n raise ValueError(f\"{name} should be of length 1, {n} or {n + 2}. \"\n f\"Received: {name}={value} of length {len_value}\")\n\n # Add batch and channel dims (always 1).\n if channel_index == 1:\n return [1, 1] + value\n else:\n return [1] + value + [1]", "def limit(iterator, n=None):\n # Producing at most `n` values from the given iterator.\n # Tried using try-except blocks to cover the stop iteration\n # exception for iter.\n if not n:\n return iterator\n\n output = []\n iter = (i for i in iterator)\n for x in range(0, n):\n\n try:\n output.append(next(iter))\n\n except StopIteration:\n pass\n\n return output", "def makespread(sequence, num):\n length = float(len(sequence))\n seq = np.array(sequence)\n return seq[np.ceil(np.arange(num) * length / num).astype(int)]", "def lookahead(self, n=1):\n while len(self._remaining) < n:\n self._remaining.append(next(self))\n return [self._remaining[i] for i in range(n)]", "def groups_of(seq, n):\n for i in range(0, len(seq), n):\n yield seq[i : (i + n)]", "def vec_shift_left_n(x, n):\n return jnp.zeros_like(x).at[0:-n].set(x[n:])", "def _nsmallest(\n self,\n n=5,\n columns: Optional[List[str]] = None,\n keep: Literal[\"last\", \"first\"] = \"first\",\n ):\n return self.sort(by=columns, ascending=True).head(n)", "def limit(iterator, n=None):\n for i, v in enumerate(iterator):\n yield v\n if i + 1 == n:\n break", "def at_least_n_ver(seq, n):\r\n num_AB = 0\r\n for item in seq:\r\n if np.array_equal(item, Quantifier.AB):\r\n if num_AB == n-1:\r\n return Quantifier.T\r\n else:\r\n num_AB += 1\r\n return Quantifier.F", "def lowestCurrent(requestContext, seriesList, n):\n\n return sorted( seriesList, key=safeLast )[:n]", "def get_number(number_seqeunce):\n number_sequence.append(None)\n return [i for i in number_seqeunce if number_seqeunce.count(i) % 2 != 0][0]", "def burnin_by_first_n(samples, N, step_number=False, logger_level=\"debug\"):\n _samples = copy.deepcopy(samples)\n n_samples = {key: N for key in _samples.keys()}\n if step_number:\n n_samples = {\n key: item + N if item is not None else N for key, item in\n _number_of_negative_steps(_samples, logger_level=logger_level).items()\n }\n getattr(logger, logger_level)(\n \"Removing the first {} as burnin\".format(\n \", \".join(\n [\"{} samples from {}\".format(val, key) for key, val in n_samples.items()]\n )\n )\n )\n return _samples.discard_samples(n_samples)", "def split_into_samples(seq, n_steps_past, n_steps_future):\n \n X_Series, Y_Series = list(), list()\n\n for step in range(0,len(seq)): \n \n val_past = step + n_steps_past\n val_fwd = val_past + n_steps_future\n \n if val_fwd > len(seq):\n break\n \n # Get past values\n X_Series.append(seq.values[step:val_past])\n # Get forward values\n Y_Series.append(seq.values[val_past:val_fwd])\n\n return np.array(X_Series), np.array(Y_Series)", "def limit(requestContext, seriesList, n):\n return seriesList[0:n]", "def iterslices(iterable, n, pad_last=False, pad_value=None):\n current = []\n for a in iterable:\n current.append(a)\n if len(current) == n:\n yield current\n current = []\n if current:\n if pad_last:\n current += [pad_value] * (n-len(current))\n yield current", "def get_every_nth_state(n=10):\n loops = int(50/n)\n state_list = []\n for i in range(loops):\n state_list.append(states[(i+1)*n-1])\n return state_list", "def getitem(s, i):\n while i > 0:\n s, i = rest(s), i - 1\n return first(s)", "def genslices(n):\n return product(range(-n, n + 1), range(-n, n + 1), range(-n, n + 1))", "def kmers(self, n: int, step: int = 1) -> Generator:\n return (\n Seq(self.sequence[i : i + n]) for i in range(0, len(self.sequence), step)\n )", "def genslices(n):\n return product(range(-n, n+1), range(-n, n+1), range(-n, n+1))", "def rest(self, n=1):\n return _(self._[n:])", "def chunks(seq, n):\n assert len(seq) > n\n avg = len(seq) / float(n)\n out = []\n last = 0\n while round(last) < len(seq):\n out.append(seq[round(last):round(last + avg)])\n last += avg\n return out", "def get_table_nfirst_lines(self, table, n=1):\n sql = \"SELECT * FROM %s ;\" % table\n cur = self._connection.cursor()\n cur.execute(sql)\n if n <= 1:\n res = [cur.fetchone()]\n else:\n res = []\n for line in cur:\n n -= 1\n if n <= -1:\n break\n res.append(line)\n cur.close()\n return res" ]
[ "0.7429517", "0.6876003", "0.6727532", "0.67118037", "0.66985476", "0.66985476", "0.66961426", "0.6665714", "0.6665714", "0.66652995", "0.6647308", "0.64990807", "0.6490624", "0.6476307", "0.6448948", "0.6416278", "0.6414325", "0.64115244", "0.6388718", "0.63765794", "0.6330259", "0.63142514", "0.6313415", "0.6304847", "0.6304847", "0.6304847", "0.63014895", "0.62594324", "0.6253964", "0.6246031", "0.6239908", "0.62289876", "0.6219591", "0.6147841", "0.6069936", "0.6058204", "0.60553735", "0.6054018", "0.6054018", "0.60340697", "0.5997329", "0.5972133", "0.5965218", "0.5961913", "0.5947355", "0.59453964", "0.5908225", "0.5906719", "0.58678347", "0.58647346", "0.58564585", "0.58543134", "0.5835839", "0.5812396", "0.5774217", "0.5762141", "0.57594097", "0.57460517", "0.57154816", "0.5691104", "0.5689176", "0.5662946", "0.56612015", "0.56569666", "0.56238914", "0.5614048", "0.5613208", "0.55777293", "0.5574445", "0.5563698", "0.55634326", "0.5559268", "0.552879", "0.5523672", "0.55224586", "0.5484543", "0.54827785", "0.5482291", "0.5480719", "0.54679984", "0.54634964", "0.5462094", "0.5459591", "0.5458175", "0.54444796", "0.5438983", "0.5419534", "0.541874", "0.5414765", "0.54117256", "0.54112446", "0.54076946", "0.539579", "0.5395411", "0.53925717", "0.53869784", "0.5381496", "0.53733295", "0.5363373", "0.5351289" ]
0.7366886
1
Report Method to Get Work Order Details.
def get_work_order_detail(self, date_range): work_order_obj = self.env["task.line"] start = datetime.strptime(date_range.get("date_from"), "%Y-%m-%d") end = datetime.strptime(date_range.get("date_to"), "%Y-%m-%d") step = timedelta(days=1) workorder_detail = [] while start <= end: sdate = str( datetime.strptime( str(start.date()) + " 00:00:00", DEFAULT_SERVER_DATETIME_FORMAT ) ) edate = str( datetime.strptime( str(start.date()) + " 23:59:59", DEFAULT_SERVER_DATETIME_FORMAT ) ) work_order_ids = work_order_obj.search( [("date_issued", ">=", sdate), ("date_issued", "<=", edate)] ) if work_order_ids: parts_data = {} parts_value = [] for parts_line in work_order_ids: if ( parts_line.fleet_service_id and parts_line.fleet_service_id.state == "done" ): parts_dict = { "wo_name": parts_line.fleet_service_id and parts_line.fleet_service_id.name or "", "vehicle_id": parts_line.fleet_service_id and parts_line.fleet_service_id.vehicle_id and parts_line.fleet_service_id.vehicle_id.name or "", "part_no": parts_line.product_id and parts_line.product_id.default_code or "", "part_name": parts_line.product_id and parts_line.product_id.name or "", "vehicle_make": parts_line.vehicle_make_id and parts_line.vehicle_make_id.name or "", "qty": parts_line.qty or 0.0, "uom": parts_line.product_uom and parts_line.product_uom.name or "", "old_part_return": parts_line.old_part_return and "Yes" or "No", "issued_by": parts_line.issued_by and parts_line.issued_by.name or "", "remarks": parts_line.fleet_service_id and parts_line.fleet_service_id.note or "", } parts_value.append(parts_dict) if parts_value: parts_value = sorted(parts_value, key=lambda k: k["wo_name"]) parts_data = {"date": start.date(), "value": parts_value} workorder_detail.append(parts_data) start += step return workorder_detail
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def order_report():", "def work_order_receipt_retrieve(self, work_order_id, id=None):\n pass", "def get_order_detail(orderid): \n data = order_obj.get_order_detail(orderid)\n return data", "def open_workorders(self, cr, uid, ids, context=None):\n context = context or {}\n models_data = self.pool.get('ir.model.data')\n data = self.browse(cr, uid, ids[0])\n wo_ids = self._make_query_result(cr, uid, data, context=context)\n\n # Get workorder views\n dummy, form_view = models_data.get_object_reference(cr, uid, 'l10n_in_mrp_subcontract', 'mrp_production_workcenter_form_cost_report')\n dummy, tree_view = models_data.get_object_reference(cr, uid, 'l10n_in_mrp_subcontract', 'mrp_production_workcenter_tree_view_cost_report')\n\n context.update({'group_by':'production_id'})\n\n return {\n 'domain': \"[('id','in',[\"+','.join(map(str, wo_ids))+\"])]\",\n 'name': _('WorkOrder Cost Analysis'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'context':context,\n 'res_model': 'mrp.production.workcenter.line',\n 'views': [(tree_view or False, 'tree'), (form_view or False, 'form')],\n 'type': 'ir.actions.act_window',\n }", "def getOrderInfo(self):\n return self.__orderinfo", "def order_item_details(self) -> 'outputs.OrderItemDetailsResponse':\n return pulumi.get(self, \"order_item_details\")", "def get_wo_mthly_smry(self, workorder_browse):\n wo_summary_data = []\n wo_check_dict = {}\n no = 0\n if workorder_browse:\n for work_rec in workorder_browse:\n if work_rec.state and work_rec.state == \"done\":\n no += 1\n identification = \"\"\n repair_line_data = \"\"\n if work_rec.vehicle_id:\n identification += work_rec.vehicle_id.name\n if work_rec.vehicle_id.f_brand_id:\n identification += \" \" + work_rec.vehicle_id.f_brand_id.name\n if work_rec.vehicle_id.model_id:\n identification += \" \" + work_rec.vehicle_id.model_id.name\n for repaire_line in work_rec.repair_line_ids:\n if repaire_line.complete is True:\n if (\n repaire_line.repair_type_id\n and repaire_line.repair_type_id.name\n ):\n repair_line_data += (\n repaire_line.repair_type_id.name + \", \"\n )\n if work_rec.parts_ids:\n for parts_line in work_rec.parts_ids:\n if work_rec.id in wo_check_dict.keys():\n parts_data = {\n \"no\": -1,\n \"location\": \"\",\n \"type\": \"\",\n \"wo\": \"\",\n \"identification\": \"\",\n \"vin\": \"\",\n \"plate_no\": \"\",\n \"work_performed\": \"\",\n \"part\": parts_line.product_id\n and parts_line.product_id.default_code\n or \"\",\n \"qty\": parts_line.qty or 0.0,\n \"uom\": parts_line.product_uom\n and parts_line.product_uom.name\n or \"\",\n }\n wo_summary_data.append(parts_data)\n else:\n wo_check_dict[work_rec.id] = work_rec.id\n parts_data = {\n \"no\": no,\n \"location\": work_rec.team_id\n and work_rec.team_id.name\n or \"\",\n \"type\": work_rec.main_type or \"\",\n \"wo\": work_rec.name or \"\",\n \"identification\": identification or \"\",\n \"vin\": work_rec.vehicle_id\n and work_rec.vehicle_id.vin_sn\n or \"\",\n \"plate_no\": work_rec.vehicle_id\n and work_rec.vehicle_id.license_plate\n or \"\",\n \"work_performed\": repair_line_data\n and repair_line_data[:-2]\n or \"\",\n \"part\": parts_line.product_id\n and parts_line.product_id.default_code\n or \"\",\n \"qty\": parts_line.qty or 0.0,\n \"uom\": parts_line.product_uom\n and parts_line.product_uom.name\n or \"\",\n }\n wo_summary_data.append(parts_data)\n else:\n parts_data = {\n \"no\": no,\n \"location\": work_rec.team_id\n and work_rec.team_id.name\n or \"\",\n \"type\": work_rec.main_type or \"\",\n \"wo\": work_rec.name or \"\",\n \"identification\": identification or \"\",\n \"vin\": work_rec.vehicle_id\n and work_rec.vehicle_id.vin_sn\n or \"\",\n \"plate_no\": work_rec.vehicle_id\n and work_rec.vehicle_id.license_plate\n or \"\",\n \"work_performed\": repair_line_data\n and repair_line_data[:-2]\n or \"\",\n \"vehicle_make\": \"\",\n \"qty\": \"\",\n \"uom\": \"\",\n }\n wo_summary_data.append(parts_data)\n if not wo_summary_data:\n msg = _(\n \"Warning! \\n\\\n No data Available for selected work order.\"\n )\n raise UserError(msg)\n return wo_summary_data", "def get_details(self):", "def get_order_details(game_id: int, user_id: int, start_time: float = None, end_time: float = None):\n start_time, end_time = get_time_defaults(game_id, start_time, end_time)\n query = \"\"\"\n SELECT\n o.id as order_id,\n relevant_orders.status,\n relevant_orders.order_status_id,\n symbol,\n relevant_orders.timestamp,\n buy_or_sell,\n quantity,\n order_type,\n time_in_force,\n price,\n relevant_orders.clear_price\n FROM orders o\n INNER JOIN (\n SELECT os_full.id,\n os_full.timestamp,\n os_full.order_id,\n os_full.clear_price,\n os_full.status,\n os_relevant.order_status_id\n FROM order_status os_full\n INNER JOIN (\n SELECT os.order_id, grouped_os.max_id as order_status_id\n FROM order_status os\n INNER JOIN\n (SELECT order_id, max(id) as max_id\n FROM order_status\n GROUP BY order_id) grouped_os\n ON\n os.id = grouped_os.max_id\n WHERE os.status NOT IN ('cancelled', 'expired')\n ) os_relevant\n ON os_relevant.order_id = os_full.order_id\n ) relevant_orders\n ON relevant_orders.order_id = o.id\n WHERE game_id = %s AND user_id = %s AND relevant_orders.timestamp >= %s AND relevant_orders.timestamp <= %s;\"\"\"\n\n with engine.connect() as conn:\n df = pd.read_sql(query, conn, params=[game_id, user_id, start_time, end_time])\n\n df = pivot_order_details(df)\n df[\"status\"] = \"fulfilled\"\n df.loc[df[\"timestamp_fulfilled\"].isna(), \"status\"] = \"pending\"\n return df", "def __str__(self):\n return f'Order: {self.size} {self.drink_name} from {self.shop}\\n' \\\n f'Details: {self.details}\\n' \\\n f'Location: {self.location}\\n' \\\n f'Contact Info: {self.customer_name}, {self.customer_number}'", "def test_get_order_address(self):\n pass", "def test_get_order(self):\n pass", "def work_order_receipt_retrieve(self, work_order_id, id=None):\n if work_order_id is None or not is_hex(work_order_id):\n logging.error(\"Work order id is empty or Invalid\")\n return create_jrpc_response(id, JsonRpcErrorCode.INVALID_PARAMETER,\n \"Worker id is empty or Invalid\")\n\n json_rpc_request = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"WorkOrderReceiptRetrieve\",\n \"id\": id,\n \"params\": {\n \"workOrderId\": work_order_id\n }\n }\n response = self.__uri_client._postmsg(json.dumps(json_rpc_request))\n return response", "def get_order(self, order_id):\n request = OrdersGetRequest(order_id)\n # 3. Call PayPal to get the transaction\n response = self.client.execute(request)\n # 4. Save the transaction in your database. Implement logic to save transaction to your database for future reference.\n print('Status Code: ', response.status_code)\n print('Status: ', response.result.status)\n print('Order ID: ', response.result.id)\n print('Intent: ', response.result.intent)\n print('Links:')\n for link in response.result.links:\n print('\\t{}: {}\\tCall Type: {}'.format(\n link.rel, link.href, link.method))\n print('Gross Amount: {} {}'.format(response.result.purchase_units[0].amount.currency_code,\n response.result.purchase_units[0].amount.value))", "def ZeusOrderDetails(request):\n\n\tif request.method == \"GET\":\n\t\t\n\t\tform = ZeusOrderDetailsForm(request.GET)\n\n\t\tif form.is_valid():\n\t\t\t\n\t\t\ttry:\n\t\t\t\t# Get the Data of the Order being viewed\n\t\t\t\torder_data = Orders.objects.get(hash_key=form.cleaned_data.get(\"order\"))\n\n\t\t\t\torder_data.total = \"{:,.2f}\".format(float(order_data.subtotal) + float(order_data.shipping_cost))\n\t\t\t\torder_data.subtotal = \"{:,.2f}\".format(order_data.subtotal)\n\t\t\t\torder_data.shipping_cost = \"{:,.2f}\".format(order_data.shipping_cost)\n\n\t\t\t\t# Get the data needed for the cart product\n\t\t\t\tfor a in range(len(order_data.cart_data)):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tproduct = Products.objects.get(hash_key=order_data.cart_data[a][\"product_id\"])\n\t\t\t\t\t\torder_data.cart_data[a][\"image_0\"] = (product.image_0.url).replace(\"&export=download\", \"\") if product.image_0.url else None\n\t\t\t\t\t\torder_data.cart_data[a][\"price\"] = product.price\n\t\t\t\t\t\torder_data.cart_data[a][\"discount_per\"] = order_data.discount_per\n\t\t\t\t\t\torder_data.cart_data[a][\"d_price\"] = \"{:,.2f}\".format((product.price * (100 - order_data.discount_per[\"user_discount\"]) / 100) * (100 - order_data.discount_per[\"coupon_discount\"]) / 100 if order_data.discount_per else product.price * (100 - order_data.discount_per[\"coupon_discount\"]) / 100)\n\t\t\t\t\t\torder_data.cart_data[a][\"card_color\"] = product.card_color\n\t\t\t\t\texcept Products.DoesNotExist:\n\t\t\t\t\t\torder_data.cart_data[a][\"price\"] = \"N/A\"\n\n\t\t\t\thtml_content = {\n\t\t\t\t\t\"order_data\": order_data\n\t\t\t\t}\n\t\t\t\treturn render(request, \"lost-empire/site_templates/zeus/orders/order_details.html\", html_content)\n\t\t\texcept Orders.DoesNotExist:\n\t\t\t\tmessages.error(request, \"Order is not available in the Database.\")\n\t\t\t\treturn HttpResponseRedirect(reverse(\"ZeusOrders\"))\n\t\telse:\n\t\t\t# Handle errors if form is invalid\n\t\t\tform_error_catcher(request, form, [\"order\"])\n\t\t\treturn HttpResponseRedirect(reverse(\"ZeusOrders\"))\n\t\n\telif request.method == \"POST\":\n\n\t\t# Validate the inputs\n\t\tform = ZeusOrderDetailsForm(request.POST)\n\n\t\tif form.is_valid():\n\t\t\t\n\t\t\t# Check if the order is being completed\n\t\t\tif request.GET.get(\"p\") == \"order_completed\":\n\t\t\t\t\n\t\t\t\t# Shipping Company name is required even tho in forms.py is set to False\n\t\t\t\tif not form.cleaned_data.get(\"shippingcompany\"):\n\t\t\t\t\tmessages.warning(request, \"Shipping company is required. Please provide the name of the shipping company.\")\n\t\t\t\t\treturn HttpResponseRedirect(f\"/zeus/orders/order_details?order={form.cleaned_data.get('order')}\")\n\t\t\t\telse:\n\n\t\t\t\t\t# Check if the order is still in the Database\n\t\t\t\t\ttry:\n\t\t\t\t\t\t# Get the Data of that order\n\t\t\t\t\t\torder_data = Orders.objects.get(hash_key=form.cleaned_data.get(\"order\"))\n\n\t\t\t\t\t\t# Set it to completed\n\t\t\t\t\t\torder_data.order_status = \"COMPLETED\"\n\n\t\t\t\t\t\t# Add the Shipping company name\n\t\t\t\t\t\torder_data.shipping_company = form.cleaned_data.get(\"shippingcompany\")\n\n\t\t\t\t\t\t# Check if the tracker code/id is available\n\t\t\t\t\t\tif form.cleaned_data.get(\"trackercode\"):\n\t\t\t\t\t\t\t# Add it to the orders data\n\t\t\t\t\t\t\torder_data.tracker_id = form.cleaned_data.get(\"trackercode\")\n\n\t\t\t\t\t\t# Commit to the Database (Save the changes to the Database)\n\t\t\t\t\t\torder_data.save()\n\n\t\t\t\t\t\tmessages.success(request, \"Order has been completed.\")\n\t\t\t\t\t\treturn HttpResponseRedirect(f\"/zeus/orders/order_details?order={form.cleaned_data.get('order')}\")\n\t\t\t\t\texcept Orders.DoesNotExist:\n\t\t\t\t\t\tmessage.error(request, \"The order is no longer available in the Database. Most likely it has been removed\")\n\t\t\t\t\t\treturn HttpResponseRedirect(reverse(\"ZeusOrders\"))\n\t\t\t\n\t\t\t# Check if the order is being denied\n\t\t\telif request.GET.get(\"p\") == \"denied_order\":\n\t\t\t\t\n\t\t\t\t# Check if the order is still in the Database\n\t\t\t\ttry:\n\t\t\t\t\t# Get the Data of that order\n\t\t\t\t\torder_data = Orders.objects.get(hash_key=form.cleaned_data.get(\"order\"))\n\n\t\t\t\t\t# Set it to denied\n\t\t\t\t\torder_data.order_status = \"DENIED\"\n\n\t\t\t\t\t# Add the Shipping company name\n\t\t\t\t\tif form.cleaned_data.get(\"deniedmessage\"):\n\t\t\t\t\t\torder_data.denied_msg = form.cleaned_data.get(\"deniedmessage\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tmessages.error(request, \"A message of denial is required to successfully deny an order\")\n\t\t\t\t\t\treturn HttpResponseRedirect(f\"/zeus/orders/order_details?order={form.cleaned_data.get('order')}\")\n\n\t\t\t\t\t# Check if refund is enabled\n\t\t\t\t\tif form.cleaned_data.get(\"refund_order_checkbox\"):\n\t\t\t\t\t\torder_data.refund_amount = order_data.paypal_data[\"purchase_units\"][0][\"payments\"][\"captures\"][0][\"amount\"][\"value\"]\n\t\t\t\t\t\trefund_status = RefundOrder(order_data.paypal_data[\"purchase_units\"][0][\"payments\"][\"captures\"][0][\"id\"], refund_amount=\"{:.2F}\".format(float(order_data.paypal_data[\"purchase_units\"][0][\"payments\"][\"captures\"][0][\"amount\"][\"value\"])), currency_code=order_data.paypal_data[\"purchase_units\"][0][\"payments\"][\"captures\"][0][\"amount\"][\"currency_code\"])\n\n\t\t\t\t\t\t# Check if the ReFund was successful\n\t\t\t\t\t\tif not refund_status:\n\t\t\t\t\t\t\tmessages.error(request, \"Refund failed. Please go to the Merchant's PayPal Account and check the status of refund for this order.\")\n\t\t\t\t\t\t\treturn HttpResponseRedirect(f\"/zeus/orders/order_details?order={form.cleaned_data.get('order')}\")\n\t\t\t\t\telse:\n\t\t\t\t\t\torder_data.refund_amount = 0\n\t\t\t\t\t\t\n\t\t\t\t\t# Commit to the Database (Save the changes to the Database)\n\t\t\t\t\torder_data.save()\n\n\t\t\t\t\tmessages.success(request, \"Order has been denied.\")\n\t\t\t\t\treturn HttpResponseRedirect(f\"/zeus/orders/order_details?order={form.cleaned_data.get('order')}\")\n\t\t\t\texcept Orders.DoesNotExist:\n\t\t\t\t\tmessage.error(request, \"The order is no longer available in the Database. Most likely it has been removed\")\n\t\t\t\t\treturn HttpResponseRedirect(reverse(\"ZeusOrders\"))\n\t\t\t\n\t\t\t# Else tell the user that the option p is missing\n\t\t\telse:\n\t\t\t\tmessages.error(request, \"Missing p option.\")\n\t\t\t\treturn HttpResponseRedirect(f\"/zeus/orders/order_details?order={form.cleaned_data.get('order')}\")\n\t\telse:\n\t\t\t# Handle errors if form is invalid\n\t\t\tform_error_catcher(request, form, [\"shippingcompany\", \"trackercode\", \"deniedmessage\"])\n\t\t\treturn HttpResponseRedirect(f\"/zeus/orders/order_details?order={form.cleaned_data.get('order')}\")", "def order_details(request, order_id, **kwargs):\n order = Order.objects.get(pk=order_id)\n if order.receiver != request.user and request.user.shipper_info.shipper_type != ShipperInfo.ShipperType.FRIENDSHIP_BIDDER:\n messages.error(request, 'You do not have permission to view this page.')\n return redirect('friendship:index')\n\n actions = OrderAction.objects.filter(order=order)\n\n # default currency to USD\n if \"currency\" not in request.session:\n request.session[\"currency\"] = Money.Currency.USD\n\n # calculate subtotal\n currency = request.session[\"currency\"]\n\n subtotal = 0\n min_bid = get_min_bid(order)\n \n if min_bid:\n if min_bid.retail_price:\n subtotal += min_bid.retail_price.get_value(currency)\n if min_bid.service_fee:\n subtotal += min_bid.service_fee.get_value(currency)\n\n data_dict = {}\n if len(order.url) > 50:\n order_url = order.url[0:47] + \"...\"\n else:\n order_url = order.url\n\n us_tracking = TrackingNumber.objects.filter(\n order=order\n ).filter(\n shipping_stage=TrackingNumber.ShippingStage.MERCHANT_TO_SHIPPER\n )\n thai_tracking = TrackingNumber.objects.filter(\n order=order\n ).filter(\n shipping_stage=TrackingNumber.ShippingStage.DOMESTIC_TO_RECEIVER\n )\n\n data_dict.update({\n 'us_tracking': us_tracking[0] if us_tracking else None,\n 'thai_tracking': thai_tracking[0] if thai_tracking else None\n })\n\n if min_bid:\n thb_total = math.ceil(min_bid.get_total(currency=Money.Currency.THB))\n else:\n thb_total = 0\n\n data_dict.update({\n 'order': order,\n 'order_url': order_url,\n 'actions': reversed(actions),\n 'latest_action': order.latest_action,\n 'min_bid': min_bid,\n 'subtotal': Money.format_value(subtotal, currency),\n 'usd': Money.Currency.USD,\n 'thb': Money.Currency.THB,\n 'usd_str': str(Money.Currency.USD).upper(),\n 'thb_str': str(Money.Currency.THB).upper(),\n 'thb_total': str(thb_total),\n 'currency': currency,\n 'manual_wire_transfer_form': ManualWireTransferForm(),\n })\n data_dict.update(kwargs)\n\n data_dict.update({ k : v.value\n for (k,v)\n in OrderAction.Action._member_map_.items()\n })\n\n new_val = math.ceil(thb_total - thb_total * settings.MANUAL_BANK_TRANSFER_DISCOUNT)\n\n # Manual bank transfer discount\n data_dict[\"manual_bank_transfer_total_str\"] = \"\\u0E3F{}\".format(\n new_val\n )\n data_dict[\"discount_str\"] = \"-\\u0E3F{}\".format(thb_total - new_val)\n\n # Braintree Setup\n if settings.DEBUG:\n env = \"sandbox\"\n else:\n env = \"production\"\n\n gateway = braintree.BraintreeGateway(access_token=settings.BRAINTREE_ACCESS_TOKEN)\n client_token = gateway.client_token.generate()\n client = \"{\" + \\\n f\"{env}: '{client_token}'\" + \\\n \"}\"\n data_dict[\"braintree_client\"] = client\n data_dict[\"payment_env\"] = env\n\n return render(request, 'friendship/order_details.html', data_dict)", "def get_order(self, walletId, orderId):\n return", "def get_order(self):\n #store the orders for the current cycle inside the class\n self.orders = self.firebase.get_data(\"orders\")", "def test_get_orders(self):\n pass", "def returnOrderTrades(self, order_number):", "def show_order_detail(self, order_id):\n\n data = cur.execute(\"\"\"SELECT productid, productname, quantity, location FROM orderitems WHERE orderid = ?\"\"\",\n (order_id,)).fetchall()\n print(tabulate(data, headers=[\"Product ID\", \"Name\", \"Quantity\", \"Location\"]))", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def GetOrder(order_id): \n\t\"\"\"Method to get order\"\"\"\n\trequest = OrdersGetRequest(order_id)\n\tresponse = client.execute(request)\n\treturn response.result.__dict__[\"_dict\"]", "def __str__(self):\n return self.order_id", "def test_get_order_buyer_info(self):\n pass", "def trackOrderRequest(self):\n\t\tstart_dat=datetime.today()\n\t\tstart_date = start_dat - timedelta( hours=start_dat.time().hour,minutes=start_dat.time().minute,seconds=start_dat.time().second ) \n\t\tend_date=start_dat\n\t\tans=None\n\t\t#print start_dat.time().hour\n\t\tprint end_date\n\t\tans=Order.objects.filter(date_of_order__range=(start_date,end_date))\n\t\tlst=[]\n\t\tfor b in ans:\n\t\t\towneradd=b.owner_id.address\n\t\t\tuseradd=b.userid.address\n\t\t\tusername=b.userid.email\n\t\t\townername=b.owner_id.email\n\t\t\tuserphone=b.userid.contact_no\n\t\t\townerphone=b.owner_id.contact_no\n\t\t\tbookname=b.bookid.title\n\t\t\tstatus=b.paymentid.ispending\n\t\t\tbook=b.__dict__\n\t\t\tbook['owneradd']=owneradd\n\t\t\tbook['useradd']=useradd\n\t\t\tbook['username']=username\n\t\t\tbook['ownername']=ownername\n\t\t\tbook['userphone']=userphone\n\t\t\tbook['ownerphone']=ownerphone\n\t\t\tbook['name']=bookname\n\t\t\tif status==True:\n\t\t\t\tbook['status']=\"Pending\"\n\t\t\telse:\n\t\t\t\tbook['status']=\"Delivered\"\n\t\t\tlst.append(book)\n\t\t#print ans\n\t\t\n\t\treturn lst", "def work_order_receipt_create(self, work_order_id, worker_id,\n worker_service_id,\n requester_id,\n receipt_create_status,\n work_order_request_hash,\n id=None):\n pass", "def work_order_receipt_lookup(self, worker_service_id,\n worker_id,\n requester_id,\n receipt_status, id=None):\n pass", "def trade_details(self) -> MqexsTradeDetails:\n return self.__trade_details", "def issue_details(board, issue_number):\n issue = BoardIssue(board, issue_number)\n return issue.details()", "def get_details(self):\n raise Exception(\"bad details\")", "def shipmentDetails(request):\n order_id = request.GET.get('order_id')\n generate_request = oAuth_magento()\n\n payload = {\"searchCriteria[filter_groups][0][filters][0][field]\": \"increment_id\",\n \"searchCriteria[filter_groups][0][filters][0][value]\": order_id,\n \"searchCriteria[filter_groups][0][filters][0][conditionType]\": \"eq\",\n \"fields\": \"items[status,base_currency_code,grand_total,items[name,sku],extension_attributes[shipping_assignments[shipping[address[city,company,country_id,firstname,lastname,postcode,region,telephone]]]]]\",\n }\n response = requests.request(\"GET\", url=generate_request[0], headers=generate_request[1], params=payload)\n # with open('temp_files/magento_get_order_select.json','w') as f:\n # f.write(response.text)\n json_response = json.loads(response.text)\n context = {'result': json_response['items'][0]['extension_attributes']['shipping_assignments'][0]['shipping']['address'], \n 'status': json_response['items'][0]['status'],\n 'item_name': json_response['items'][0]['items'],\n 'price': json_response['items'][0]['base_currency_code'] + ' ' + str(json_response['items'][0]['grand_total']),\n }\n return JsonResponse(context)", "def compute_generate_order_details(request, order_basic, **kwargs):\n commodity_id_counts = {} # use dict store map between id and counts\n common_logger.info(type(request.session.get('commodity_list')))\n session_commodity_list = request.session.get('commodity_list', None)\n session_counts_list = request.session.get('counts_list', None)\n\n # 防止接口攻击\n if session_commodity_list is None or session_counts_list is None:\n raise Exception\n\n total_price = 0 # 订单总价\n try:\n # generate dict\n for pk, counts in zip(session_commodity_list, session_counts_list):\n commodity_id_counts[pk] = counts\n commodity = Commodity.commodity_.select_related('store', 'shopper').filter(\n pk__in=session_commodity_list) # one hit database\n for value in commodity:\n order_details = Order_details.order_details_.create(belong_shopper=value.shopper,\n commodity=value,\n order_basic=order_basic,\n price=value.discounts * value.price,\n commodity_counts=commodity_id_counts.get(value.pk),\n )\n total_price += value.price * value.discounts * commodity_id_counts.get(value.pk)\n total_price = PaymentSerializer.compute_total_price(total_price)\n except Exception as e:\n order_logger.error(e)\n return None, 0\n else:\n return total_price, sum(session_counts_list)", "def save_object(self, data):\n return OrderDetails(**data)", "def run(self):\n report_details = self.report_client.get(self.csv_report)\n print(\"Report Details - \", report_details)", "def report(self):\n log = self._array.state()\n result = []\n for record in log:\n result.append(f\"{record.worker_name()}\\t${record.task_payment()}\")\n return \"\\n\".join(result)", "def default_get(self, cr, uid, fields, context=None):\n\t\tif not context:context={}\n\t\tres = super(sale_order_delivery_wizard, self).default_get(cr, uid, fields, context=context)\n\n\t\tif 'active_id' in context:\n\t\t\torder_id = context.get('active_id', False)\n\t\t\tif order_id:\n\t\t\t\torder = self.pool.get('sale.order').browse(cr,uid,order_id,context)\n\t\t\t\tres['name']=order.name\n\t\t\t\tres['order_id']=order.id\n\t\t\t\tres['order_line']=[]\n\t\t\t\tres['delivery_date']=time.strftime('%Y-%m-%d')\n\t\t\t\tfor line in order.order_line:\n\t\t\t\t\tres['order_line'].append((0,0,{\n\t\t\t\t\t\t\"sequence_line\":line.sequence_line,\n\t\t\t\t\t\t\"name\":line.name or (line.product_id and line.product_id.name) or \"-\",\n\t\t\t\t\t\t\"line_id\":line and line.id,\n\t\t\t\t\t\t\"product_id\":line.product_id and line.product_id.id or False,\n\t\t\t\t\t\t\"product_qty\":line.product_uom_qty,\n\t\t\t\t\t\t\"product_uom\":line.product_uom and line.product_uom.id or False,\n\t\t\t\t\t\t\"product_uos_qty\":line.product_uos_qty,\n\t\t\t\t\t\t\"product_uos\":line.product_uos and line.product_uos.id or False,\n\t\t\t\t\t\t}))\n\t\treturn res", "def print_order(self):\r\n print(\"Order: Symbol=%s, Type=%s, Quantity=%s, Direction=%s\") % \\\r\n (self.symbol, self.order_type, self.quantity, self.direction)", "def order_update_print():\n result = order_obj.order_update_print(request.forms) \n return result", "def __init__(self, order_details: OrderDetails):\n self.details = order_details", "def payment_info_and_status(report):\n\n order_data = open(report)\n for line in order_data:\n order = lines_into_list(line) # function will split each line by '|' and get a list of strings\n # each order has 4 strings\n order[0:1] = []\n name = order.pop(0)\n order_as_floats = convert_to_floats(order)\n melon_count, paid = order_as_floats\n expected_cost = calculate_expected_cost(melon_cost, melon_count)\n print(f\"{name} paid ${paid:.2f}, expected ${expected_cost:.2f}\")\n check_order(name, expected_cost, paid)\n order_data.close()", "def test_get_pay_in_details(self):\n pass", "def print_report(self):\n assert len(self) == 1, 'This option should only be used for a single id at a time.'\n datas = {\n 'form': \n {\n 'company_id': self.company_id and [self.company_id.id] or [],\n 'warehouse_ids': [y.id for y in self.warehouse_ids],\n 'start_date': self.start_date,\n 'end_date': self.end_date,\n 'include_zero': self.include_zero,\n 'sort_order': self.sort_order,\n 'value': self.value,\n 'id': self.id,\n }\n }\n\n if [y.id for y in self.warehouse_ids] and (not self.company_id):\n self.warehouse_ids = []\n raise Warning(_('Please select company of those warehouses to get correct view.\\nYou should remove all warehouses first from selection field.'))\n return self.env.ref(\n 'most_selling_product.action_ir_most_selling_product'\n ).report_action(self, data=datas)", "def getworkunit(worker_id):\r\n\r\n worker_data = identify(worker_id)\r\n global time_start\r\n global started_working\r\n global work_status\r\n if work_status == Db.WorkStatusNames.has_work.value:\r\n\r\n saved_work_unit = Db.get_free_work_unit()\r\n if saved_work_unit is None:\r\n work_status = Db.WorkStatusNames.no_work.value\r\n else:\r\n if not started_working:\r\n print(\"Starting to work!\")\r\n time_start = time.time()\r\n started_working = True\r\n #It counts it's\r\n print(str(saved_work_unit[\"work_unit_id\"]) + \" \" + str(saved_work_unit))\r\n Db.assign_work_unit(saved_work_unit[\"work_unit_id\"], worker_id)\r\n return saved_work_unit\r\n\r\n\r\n\r\n return package_data({\"fail_message\": work_status})", "def data(self):\n return dict({\"order\": super(TakeProfitOrderRequest, self).data})", "def processOrders(self, printOutput=False):\n orderData = self.trader.tradeData.get('orders',None)\n if orderData.get('success') == 0: #order data contains failed api call\n logging.error('Success=0: orderData: %s' % orderData)\n orderData = self.trader.tapi.getOrders()\n if printOutput:\n try:\n for key in orderData.get('return').keys():\n order = orderData.get('return')[key]\n print('ID: %s %s %s %s at %s' %(key,\n order['pair'],\n order['type'],\n order['amount'],\n order['rate']))\n except TypeError as e:\n # TODO add debug flag for printing output to console on errors\n print'TypeError in processOrders:'\n print e\n logging.error('Type error in helper.processOrders: %s' % e)\n logging.info('orderData: %s' % orderData)\n except KeyError as e:\n print'KeyError in processOrders'\n print e\n logging.error('Key error in helper.processOrders: %s' % e)\n logging.info('orderData: %s' % orderData)\n return orderData", "def show_orders():\n return 'hehe'", "def __str__(self) -> str:\n return self.customer.name + ' completes checkout at ' + \\\n str(self.timestamp) + ' line ' + str(self.line_number)", "def retrieve(self, **kwargs):\n return self.client.execute(\"order\", \"GET\", kwargs)", "def test_get_specific_order(self):\n # Test with wrong parcel id\n # Correct format but not there\n response = self.client.get(\n 'api/v1/parcels/24034', headers=self.user_token_dict)\n data = json.loads(response.data)\n self.assertEqual(\n data, {'message': 'No Parcel delivery order with that id'})\n self.assertEqual(response.status_code, 400)\n # Test with wrong parcel id format\n response = self.client.get(\n 'api/v1/parcels/24034u', headers=self.user_token_dict) # Incorrect id format\n data = json.loads(response.data)\n self.assertEqual(data, {'message': 'Wrong id format'})\n self.assertEqual(response.status_code, 400)", "def get_bill_details(request):\n\n print request\n\n context = request['context']\n print context\n try:\n telephone_number = first_entity_value(request['entities'], 'phone_number')\n with open(os.path.join(sys.path[0], \"app/wit/static/users.json\"), \"r\") as data_file:\n data = json.load(data_file)\n customer_billing = data[telephone_number]['last_month_billing']\n print customer_billing\n\n customer_type = data[telephone_number]['type_customer']\n if customer_type == 'postpaid':\n\n reply = \"Our Initial Investigation shows that you're a \" + data[telephone_number]['type_customer'] + \" Customer and currently using \" + data[telephone_number]['plan_details'] + \" plan type.\"\n if customer_billing['roaming'] == 'True':\n reply += \"You had used your cellphone while on roaming for which you were charged extra.\"\n elif customer_billing['data_exhaust'] == 'True':\n reply += \"You had used your data network after your allocated limit was exhausted. You were charged for these services\"\n elif customer_billing['subscribed'] == 'True':\n reply += \"You had subscribed to some promotional services for which you were charged in extra.\"\n else:\n reply = \"Our Initial Investigation shows that you're a \" + data[telephone_number]['type_customer'] + \". We believe that this might be a mistake from our side and would like you to speak to our customer care executives separately.\"\n\n\n except:\n telephone_number = None\n reply = \"Your number is not subscribed with Airtel. Please contact your network operator for your query\"\n\n\n print reply\n\n context['bill_details'] = reply\n\n return context", "def test_case_customer_part_orders(self):\n pass", "def printOrders(self, event):\n \n pass", "def get_report(self):\n data = {\n 'ids': self.ids,\n 'model': self._name,\n 'form': {\n 'date_start': self.date_start,\n 'date_end': self.date_end,\n },\n }\n\n # use `module_name.report_id` as reference.\n # `report_action()` will call `_get_report_values()` and pass `data` automatically.\n return self.env.ref('base_enh.recap_report').report_action(self, data=data)", "def _generate_report(self):\n raise NotImplementedError", "def get_order_detail(ac_od_id):\n od = OrderDetail.objects.get(ac_od_id=ac_od_id)\n return od", "def test_create_confirm_order_details(self):\n pass", "def work_order_receipt_update_retrieve(self, work_order_id,\n updater_id,\n update_index, id=None):\n pass", "def __str__(self):\n return f\"Order Number: {self._order_number} \" \\\n f\"Product ID: {self._product_id} \" \\\n f\"Item: {self._item_type} \" \\\n f\"Name: {self._name} \" \\\n f\"Quantity: {self._quantity} \" \\\n f\"Product details: {self._product_details} \"", "def show_orders(self):\n\n data = cur.execute(\"\"\"SELECT * FROM orders\"\"\").fetchall()\n print(tabulate(data, headers=[\"Order ID\", \"Status\", \"Customer\", \"Address\", \"Delivery Method\"]))", "def print_report(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n partner_obj = self.pool.get('res.partner')\n partner_ids = partner_obj.search(cr, uid, [('customer', '=', True)], context=context)\n current_date = datetime.today().strftime('%Y-%m-%d')\n date = (datetime.today() - relativedelta(months=+1,day=1,days=-1)).strftime('%Y-%m-%d')\n print_ids = []\n for partner in partner_obj.browse(cr, uid, partner_ids, context=context):\n for sale in partner.sale_order_ids:\n if date < sale.date_order and sale.date_order < current_date:\n print_ids.append(partner.id)\n \n list_ids = []\n list_ids = list(set(partner_ids)-set(print_ids))\n if not print_ids:\n raise osv.except_osv(_('Warring!'), _('There is no partner'))\n \n datas = {'ids': list_ids}\n res = self.read(cr, uid, ids, context=context)\n res = res and res[0] or {}\n res.update({'ids': datas['ids']})\n datas.update({'form': res})\n return {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'inactive.partner.report',\n 'datas': datas,\n }", "def get_report(self, address: Any = None, rep_id: int = None) -> requests.Response:\n return self._call('GET', '/report', params={\n 'address': address,\n 'repId': rep_id\n })", "def get_report(self):\n raise NotImplementedError('Agent is an abstract base class')", "def get_order(self, order_id):\n request = OrdersGetRequest(order_id)\n #3. Call PayPal to get the transaction\n response = self.client.execute(request)\n return response\n #4. Save the transaction in your database. Implement logic to save transaction to your database for future reference.", "def on_order(self, order: OrderData):\n # print(\"on_order\")\n # print(order)\n pass", "def proposed_order_print(proposed_order_list):\n for item_details in proposed_order_list:\n proposed_order_item_print(item_details)", "def get_orders(self):\n return self.order_lst", "def get(self):\n\n bill = {\n 'product': {\n 'name': self.order.product.name,\n 'price': self.order.product.price\n },\n 'order_date_of_creation': self.order.date_of_creation,\n 'bill_date_of_creation': timezone.now(),\n 'discounts': [],\n 'total': self.order.product.price\n }\n\n return self.add_discount(bill)", "def test_get_transaction_details_request(self):\n self.trans_details.get_transaction_details(\n trans_id = 123456,\n )", "def payment_completed(order_id):\n order = Order.objects.get(id=order_id)\n\n #create invoice email\n subject = f'My Shop - EE Invoice no. {order.id}'\n message = 'Please, find attached the invoice for your recent purchase.'\n email = EmailMessage(subject, message, 'admin@myshop.com', [order.email])\n\n #generate PDF\n html = render_to_string('admin/orders/order/pdf.html', {'order': order})\n out =BytesIO()\n stylesheets = [weasyprint.CSS(settings.STATIC_ROOT + 'pdf.css')]\n weasyprint.HTML(string=html).write_pdf(out, stylesheets=stylesheets)\n\n #attach PDf file\n email.attach(f'order_{order.id}.pdf', out.getvalue(), 'application/pdf')\n\n #send email\n email.send()", "def show_receipt(context):\n return({'Store': settings.SITE_NAME,\n 'order': context['order']})", "def get(self):\n return sync.get_open_orders()", "def getOrderList(self):\r\n\t\treturn self.orders", "def get(self):\n orders = db.session.query(models.Order)\n args = order_query_parser.parse_args()\n order_id = args['order_id']\n if order_id is not None:\n orders = orders.filter_by(id=order_id)\n copy = args['copy_id']\n if copy is not None:\n orders = orders.filter_by(copy=copy)\n borrower = args['borrower']\n if borrower is not None:\n orders = orders.filter_by(borrower=borrower)\n\n copy_owner = args['copy_owner']\n if copy_owner is not None:\n orders = orders.filter_by(copy_owner=copy_owner)\n\n status = args['order_status']\n if status is not None:\n orders = orders.filter_by(status=status)\n date = args['return_date']\n if date is not None:\n orders = orders.filter_by(expire=date)\n if id is None and copy is None and borrower is None and copy_owner is None and status is None:\n return 'Please provide searching parameters', 400\n\n return [order.serialize() for order in orders], 200", "def get(self, order_id):\n order = db.session.query(models.Order).filter_by(id=order_id).first()\n if order is None:\n return 'Order does not exist', 404\n return order.serialize(), 200", "def getOrderList(self):\r\n\t\treturn self.pair.orders", "def get_issue(self, context):", "def get_report(self) -> str:\n return self.diagnostics.get_report()", "def print_quotation(self):\n self.filtered(lambda s: s.state == 'draft').write({'state': 'sent'})\n return self.env['report'].get_action(self, 'ferrua_report.sale_order')", "def __str__(self):\n\n return f'{self.order_id}'", "async def process(self, msg):\n logger.debug(\"msg:\", json.dumps(msg), caller=self)\n e = msg.get(\"e\")\n if e == \"executionReport\": # Order update.\n if msg[\"s\"] != self._raw_symbol:\n return\n order_no = \"{}_{}\".format(msg[\"i\"], msg[\"c\"])\n if msg[\"X\"] == \"NEW\":\n status = ORDER_STATUS_SUBMITTED\n elif msg[\"X\"] == \"PARTIALLY_FILLED\":\n status = ORDER_STATUS_PARTIAL_FILLED\n elif msg[\"X\"] == \"FILLED\":\n status = ORDER_STATUS_FILLED\n elif msg[\"X\"] == \"CANCELED\":\n status = ORDER_STATUS_CANCELED\n elif msg[\"X\"] == \"REJECTED\":\n status = ORDER_STATUS_FAILED\n elif msg[\"X\"] == \"EXPIRED\":\n status = ORDER_STATUS_FAILED\n else:\n logger.warn(\"unknown status:\", msg, caller=self)\n return\n order = self._orders.get(order_no)\n if not order:\n info = {\n \"platform\": self._platform,\n \"account\": self._account,\n \"strategy\": self._strategy,\n \"order_no\": order_no,\n \"action\": msg[\"S\"],\n \"order_type\": msg[\"o\"],\n \"symbol\": self._symbol,\n \"price\": msg[\"p\"],\n \"quantity\": msg[\"q\"],\n \"ctime\": msg[\"O\"]\n }\n order = Order(**info)\n self._orders[order_no] = order\n order.remain = float(msg[\"q\"]) - float(msg[\"z\"])\n order.status = status\n order.utime = msg[\"T\"]\n if self._order_update_callback:\n SingleTask.run(self._order_update_callback, copy.copy(order))", "def details(self):\n pass", "def _send_email_details(self, order):\n customer_email = order.email\n email_subject = render_to_string(\n 'checkout/confirmation_emails/confirmation_email_subject.txt',\n {'order': order})\n email_body = render_to_string(\n 'checkout/confirmation_emails/confirmation_email_body.txt',\n {'order': order, 'contact_email': settings.DEFAULT_FROM_EMAIL})\n\n send_mail(\n email_subject,\n email_body,\n settings.DEFAULT_FROM_EMAIL,\n [customer_email]\n )", "def get_shift_report_info(self, shift_id=0, shift_reg_id=0):\n try:\n personal_data = f'{\"-\" * 20}\\n'\n general_shift_info = []\n\n if shift_id == 0: # waiter\n shift_id = self.db_handler.get_shift_registration_by_shift_reg_id(shift_reg_id)[1]\n personal_data += self.get_shift_report_info_waiter(shift_reg_id)\n elif shift_reg_id == 0: # manager\n personal_data += self.get_shift_report_info_manager(shift_id)\n\n general_shift_info = self.get_shift_report_general_info(shift_id)\n\n msg = general_shift_info + personal_data\n\n return msg\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def get_specific_order(self,order_id):\n self.query = \"SELECT * FROM orders WHERE order_id=%s\"\n self.input = (order_id,) #tuple to support indexing\n self.query_1 = \"SELECT order_id FROM orders ORDER BY order_id DESC LIMIT 1.\"\n self.event = \"admin_get_specific_order\"\n self.error = \"Invalid order id\"\n self.message = \"Successfully fetched the order.\"\n self.order_id = order_id\n self.db_error = None", "def get(self):\n return DAO.orders", "def payment_completed(order_id):\n\n order = Order.objects.get(id=order_id)\n subject = f'My Shop - EE Invoice no. {order.id}'\n message = 'Please, find attached the invoice for your recent purchase.'\n email = EmailMessage(subject, message, 'admin@myshop.com', [order.email])\n\n pdf = render_to_pdf('admin/orders/order/pdf.html', {'order': order})\n email.attach(f'order_{order.id}.pdf', pdf.getvalue(), 'application/pdf')\n email.send()", "def read_work_order(order_id, file):\n ptn = re.compile(r'^===ORDER ?([0-9]*)===')\n lines = None\n on = False\n with open(file, 'r') as fin:\n for line in fin:\n m = ptn.match(line)\n if m:\n g = m.group(1)\n if g and int(g)==order_id:\n on = True\n lines = []\n else:\n on = False\n elif on:\n lines.append(line)\n if lines:\n return ''.join(lines)\n return None", "def woo_sale_report(self):\n version_info = odoo.service.common.exp_version()\n if version_info.get('server_version') == '14.0':\n action = self.env.ref('woo_commerce_ept.woo_action_order_report_all').read()[0]\n else:\n action = self.env.ref('woo_commerce_ept.woo_sale_report_action_dashboard').read()[0]\n\n return action", "def test_retrieve_iso20022_payment_instruction_status_report(self):\n pass", "def get_order_history(self):\n return self.__call__('orders', 'getorderhistory')", "def report(self) -> Any:", "def test_get_workout(self):\n response = self.client.open(\n '/workout/{id}'.format(id='id_example'),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def get_order_detail_count(orderid): \n data = order_obj.get_order_detail(orderid,\"1\")\n return data", "def get_order_number(self):\n return self.__order_number", "def get_account_details(self):\n pass", "def test_get_order_items_buyer_info(self):\n pass", "def payment_completed(order_id):\n order = Order.objects.get(id=order_id)\n # create invoice e-mail\n subject = f'My Shop - EE Invoice no. {order.id}'\n message = 'Please, find attached the invoice for your recent purchase.'\n email = EmailMessage(subject,\n message,\n 'info@tishman.com.ng',\n [order.user.email])\n # generate PDF\n html = render_to_string('orders/order/pdf.html', {'order': order})\n out = BytesIO()\n stylesheets=[weasyprint.CSS(settings.STATIC_ROOT + 'css/pdf.css')]\n weasyprint.HTML(string=html).write_pdf(out,\n stylesheets=stylesheets)\n # attach PDF file\n email.attach(f'order_{order.id}.pdf',\n out.getvalue(),\n 'application/pdf')\n # send e-mail\n email.send()" ]
[ "0.68440074", "0.6475144", "0.6159926", "0.6083596", "0.608028", "0.59921384", "0.5808397", "0.58080214", "0.5693121", "0.5688188", "0.56651086", "0.5659879", "0.5623415", "0.56180996", "0.5615256", "0.5562105", "0.55497396", "0.55432594", "0.55332184", "0.5496018", "0.54649526", "0.54066366", "0.54066366", "0.54066366", "0.53959954", "0.53732836", "0.5371083", "0.53640014", "0.53638804", "0.5361521", "0.5351858", "0.5348487", "0.5347025", "0.53357595", "0.53283584", "0.5311822", "0.53112197", "0.53065675", "0.53034616", "0.53015757", "0.52982986", "0.5286007", "0.5276682", "0.52758074", "0.5274129", "0.52683485", "0.52558255", "0.52484155", "0.52409595", "0.5224564", "0.5219756", "0.5201094", "0.5189827", "0.51863116", "0.5182551", "0.518087", "0.51753354", "0.5175236", "0.5174116", "0.5167358", "0.5163035", "0.5144373", "0.51096684", "0.51065737", "0.5099021", "0.50939786", "0.5082924", "0.50582457", "0.5057135", "0.50554913", "0.50552046", "0.50432754", "0.5042307", "0.50422394", "0.5034814", "0.5030862", "0.5028774", "0.5023023", "0.5017749", "0.5013212", "0.50121546", "0.50089276", "0.50055355", "0.50012463", "0.5000468", "0.4999156", "0.49967855", "0.49896964", "0.49579194", "0.4954897", "0.49517614", "0.4950265", "0.4947359", "0.49469364", "0.49452332", "0.49448928", "0.4943415", "0.49371123", "0.49326938", "0.49312952" ]
0.70237696
0
Generate xlsx format print report.
def generate_xlsx_report(self, workbook, data, parts_data): worksheet = workbook.add_worksheet("daily_parts_issuance_wizard") worksheet.set_column(0, 0, 10) worksheet.set_column(1, 1, 15) worksheet.set_column(2, 2, 20) worksheet.set_column(3, 3, 15) worksheet.set_column(4, 4, 10) worksheet.set_column(5, 5, 12) worksheet.set_column(6, 6, 10) worksheet.set_column(7, 7, 10) worksheet.set_column(8, 8, 15) worksheet.set_column(9, 9, 10) worksheet.set_column(10, 10, 15) worksheet.set_column(11, 11, 10) worksheet.set_column(12, 12, 20) worksheet.set_column(13, 13, 5) worksheet.set_column(14, 14, 5) worksheet.set_column(15, 15, 5) bold = workbook.add_format( {"bold": True, "font_name": "Arial", "font_size": "10"} ) tot = workbook.add_format( {"border": 2, "bold": True, "font_name": "Arial", "font_size": "10"} ) border = workbook.add_format( {"border": 2, "font_name": "Arial", "font_size": "10"} ) merge_format = workbook.add_format({"border": 2, "align": "center"}) format1 = workbook.add_format( {"border": 2, "bold": True, "font_name": "Arial", "font_size": "10"} ) format1.set_bg_color("gray") date = workbook.add_format({"num_format": "dd/mm/yy"}) worksheet.merge_range("C3:F3", "Merged Cells", merge_format) row = 0 row += 1 row += 1 worksheet.write(row, 2, "DAILY PARTS ISSUANCE", tot) row += 1 worksheet.write(row, 2, "Date From:", tot) worksheet.write(row, 3, data["form"]["date_from"] or "", border) worksheet.write(row, 4, "To:", tot) worksheet.write(row, 5, data["form"]["date_to"] or "", border) row += 2 worksheet.write(row, 0, "CMF", bold) row = 3 for objec in self.get_work_order_detail(data["form"]): row += 3 worksheet.write(row, 0, "DATE ISSUED :", bold) worksheet.write(row, 1, objec.get("date") or "", date) row += 2 worksheet.write(row, 0, "NO.", format1) worksheet.write(row, 1, "WO NO.", format1) worksheet.write(row, 2, "VEHICLE ID", format1) worksheet.write(row, 3, "PART NO.", format1) worksheet.write(row, 4, "PART NAME", format1) worksheet.write(row, 5, "VEHICLE MAKE", format1) worksheet.write(row, 6, "USED", format1) worksheet.write(row, 7, "UNIT TYPE", format1) worksheet.write(row, 8, "OLD PART RETURND", format1) worksheet.write(row, 9, "ISSUED BY", format1) worksheet.write(row, 10, "REMARKS", format1) line_row = row + 1 line_col = 0 counter = 1 for obj in objec.get("value"): worksheet.write(line_row, line_col, counter, border) line_col += 1 worksheet.write(line_row, line_col, obj.get("wo_name") or "", border) line_col += 1 worksheet.write(line_row, line_col, obj.get("vehicle_id") or "", border) line_col += 1 worksheet.write(line_row, line_col, obj.get("part_no") or "", border) line_col += 1 worksheet.write(line_row, line_col, obj.get("part_name") or "", border) line_col += 1 worksheet.write( line_row, line_col, obj.get("vehicle_make") or "", border ) line_col += 1 worksheet.write(line_row, line_col, obj.get("qty") or "", border) line_col += 1 worksheet.write(line_row, line_col, obj.get("uom") or "", border) line_col += 1 worksheet.write( line_row, line_col, obj.get("old_part_return") or "", border ) line_col += 1 worksheet.write(line_row, line_col, obj.get("issued_by") or "", border) line_col += 1 worksheet.write(line_row, line_col, obj.get("remarks") or "", border) line_col = 0 line_row += 1 counter += 1 worksheet.write(line_row, line_col, "********", border)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_xlsx(self):\n if self.date_from and self.date_to:\n if self.date_from > self.date_to:\n raise ValidationError(\"Date From must be less than Date To\")\n\n # active_record = self._context['id']\n # record = self.env['room.accommodation'].browse(active_record)\n data = {\n 'date_from': self.date_from,\n 'date_to': self.date_to,\n 'guest_id': self.guest_id.id,\n 'model_id': self.id,\n 'check_out': self.check_out,\n 'date_today': fields.Datetime.now()\n }\n\n print(\"XLSX Wizard data : \", data)\n\n return {\n 'type': 'ir.actions.report',\n 'data': {\n 'model': 'accommodation.reporting',\n 'options': json.dumps(data, default=date_utils.json_default),\n 'output_format': 'xlsx',\n 'report_name': 'Accommodation Report'\n },\n 'report_type': 'xlsx'\n }", "def outputExcelReport(self):\n # ++++++++++\n # init\n # ++++++++++\n wb = openpyxl.Workbook()\n wb.fonts = openpyxl.styles.Font(\n name = 'Courier New',\n size = 12\n )\n # create and delete sheets\n _ = wb.create_sheet(title='Cover',index=0)\n _ = wb.create_sheet(title='Results',index=1)\n _ = wb.create_sheet(title='AllItems',index=2)\n _ = wb.remove(wb.worksheets[-1])\n # ++++++++++\n # Sheet 1 <Cover>\n # ++++++++++\n ws = wb['Cover']\n # --- title and date\n timeNow = datetime.datetime.now().isoformat().split('T')[0]\n ws.merge_cells('A1:B1')\n ws.merge_cells('A3:B3')\n ws['A1'] = '納入チェック ダイアグ確認結果'\n ws['A3'] = '作成日:{}'.format(timeNow)\n # --- sample info\n ws['A5'] = '<サンプル情報>'\n self._write2excel(ws, self._sample_info, 6, 1)\n for r in range(6,8):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- checker info\n ws['A9'] = '<チェッカ情報>'\n self._write2excel(ws, self._checker_info, 10, 1)\n for r in range(10,13):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- dmm info\n ws['A14'] = '<DMM情報>'\n self._write2excel(ws, self._dmm_info, 15, 1)\n for r in range(15,18):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- resistor info\n ws['A19'] = '<抵抗器情報>'\n self._write2excel(ws, self._resistor_info, 20, 1)\n for r in range(20,23):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n ws[cell.coordinate].font = STYLE_FONT_PASS\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 2 <Results>\n # ++++++++++\n ws = wb['Results']\n # --- output all scenario\n ws['A1'] = '<結果一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._result_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==6:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n ws.cell(cell.row,6).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 3 <AllItems>\n # ++++++++++\n ws = wb['AllItems']\n # --- output all scenario\n ws['A1'] = '<出力一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._scenario_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==5:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # save book\n # ++++++++++\n wb.save(self._filename)", "def exporter():\n Session = modules.db_connect.connect()\n session = Session()\n report = xlsxwriter.Workbook('perception_report.xlsx')\n top_row_format = report.add_format({'bold': True})\n top_row_format.set_border(style=1)\n top_row_format.set_bg_color('#B8B8B8')\n\n \"\"\"Black row format at the top of each host detailed info\"\"\"\n black_row_format = report.add_format()\n black_row_format.set_border(style=1)\n black_row_format.set_bg_color('#000000')\n\n \"\"\"Detailed host row format\"\"\"\n host_row_format = report.add_format()\n host_row_format.set_border(style=1)\n host_row_format.set_bg_color('#CCCCCC')\n\n \"\"\"Format for text in row with host info\"\"\"\n host_row_wrapped_format = report.add_format()\n host_row_wrapped_format.set_border(style=1)\n host_row_wrapped_format.set_bg_color('#CCCCCC')\n host_row_wrapped_format.set_text_wrap('vjustify')\n\n \"\"\"Format description row in NSE output\"\"\"\n host_nse_output_top_format = report.add_format({'bold': True})\n host_nse_output_top_format.set_border(style=1)\n host_nse_output_top_format.set_bg_color('#B8B8B8')\n\n \"\"\"Format test row in NSE output\"\"\"\n host_nse_output_format = report.add_format()\n host_nse_output_format.set_border(style=1)\n host_nse_output_format.set_bg_color('#CCCCCC')\n\n \"\"\"Build the host_overview_worksheet\"\"\"\n host_overview_worksheet = report.add_worksheet()\n\n \"\"\"Build the host_detail_worksheet\"\"\"\n host_detail_worksheet = report.add_worksheet()\n\n \"\"\"Size up the overview worksheet\"\"\"\n host_overview_worksheet.set_column('B:B', 24)\n host_overview_worksheet.set_column('C:C', 15)\n host_overview_worksheet.set_column('D:D', 15)\n host_overview_worksheet.set_column('E:E', 15)\n host_overview_worksheet.set_column('F:F', 15)\n host_overview_worksheet.set_column('G:G', 20)\n host_overview_worksheet.set_column('H:H', 15)\n\n \"\"\"Size up the detail worksheet\"\"\"\n host_detail_worksheet.set_column('B:B', 38)\n host_detail_worksheet.set_column('C:C', 16)\n host_detail_worksheet.set_column('D:D', 16)\n host_detail_worksheet.set_column('E:E', 28)\n host_detail_worksheet.set_column('F:F', 15)\n host_detail_worksheet.set_column('H:G', 20)\n host_detail_worksheet.set_column('H:H', 25)\n host_detail_worksheet.set_column('I:I', 10)\n\n \"\"\"Description row for host overview\"\"\"\n host_overview_worksheet.write('B2', 'Hostname', top_row_format)\n host_overview_worksheet.write('C2', 'IP v4 Address', top_row_format)\n host_overview_worksheet.write('D2', 'IP v6 Address', top_row_format)\n host_overview_worksheet.write('E2', 'MAC Address', top_row_format)\n host_overview_worksheet.write('F2', 'MAC Vendor', top_row_format)\n host_overview_worksheet.write('G2', 'Operating System', top_row_format)\n host_overview_worksheet.write('H2', 'Host Type', top_row_format)\n\n \"\"\"Query the database for the hosts\"\"\"\n inventory_hosts = session.query(InventoryHost).all()\n\n \"\"\"Build overview worksheet\"\"\"\n overview_row = 2\n overview_col = 1\n for host in inventory_hosts:\n host_overview_worksheet.write(overview_row, overview_col, host.host_name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 1, host.ipv4_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 2, host.ipv6_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 3, host.macaddr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 4, host.mac_vendor.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 5, host.product.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 6, host.host_type, host_row_format)\n overview_row += 1\n\n \"\"\"Build detailed worksheet\"\"\"\n detail_row = 2\n detail_col = 1\n for host in inventory_hosts:\n\n \"\"\"Add the black row to start host detail info\"\"\"\n host_detail_worksheet.set_row(detail_row, 5)\n host_detail_worksheet.write(detail_row, detail_col, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, '', black_row_format)\n detail_row += 1\n\n \"\"\"Add row detail info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Hostname', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'IP v4 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'IP v6 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'MAC Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'MAC Vendor', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Host Type', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Operating System', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Version', top_row_format)\n detail_row += 1\n\n \"\"\"Add host info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host.host_name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, host.ipv4_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, host.ipv6_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, host.macaddr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, host.mac_vendor.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, host.host_type, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, host.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, host.product.version, host_row_format)\n detail_row += 2\n\n \"\"\"If there is no host nse script, just say so.\"\"\"\n if not host.host_nse_scripts:\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n else:\n\n \"\"\"Add the row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for host_scripts in host.host_nse_scripts:\n\n \"\"\"Count output the lines so we know what to merge\"\"\"\n lines = host_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n\n if not host.inventory_svcs:\n\n \"\"\"If there are no services for this host tell me\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n host_detail_worksheet.write(detail_row, detail_col, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'no services', host_row_format)\n detail_row += 1\n\n else:\n for ports in host.inventory_svcs:\n\n \"\"\"Host services row info\"\"\"\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n \"\"\"Write the service info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, ports.protocol, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, ports.portid, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, ports.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, ports.svc_product, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, ports.extra_info, host_row_format)\n try:\n\n \"\"\"There may not be product info, but try.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, ports.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, ports.product.version, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, ports.product.product_update,\n host_row_format)\n detail_row += 1\n except AttributeError:\n\n \"\"\"Just write unknown if there is no product info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'unknown', host_row_format)\n detail_row += 1\n\n if not ports.svc_nse_scripts:\n\n \"\"\"If there is no NSE script info just say so.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n\n else:\n\n \"\"\"Service Script row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for nse_scripts in ports.svc_nse_scripts:\n\n \"\"\"Count the lines in the output for merging\"\"\"\n lines = nse_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n nse_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines,\n detail_col + 7, nse_scripts.output,\n host_row_wrapped_format)\n detail_row += 1\n\n detail_row += 1\n report.close()\n session.close()", "def printreport():\n report = createreport()\n print(report[0])\n print(report[1])\n print(report[2])", "def generate_xls(self):\n self.wb = xlwt.Workbook()\n ws = self.wb.add_sheet('Sheet1')\n heading_style = xlwt.easyxf('font: bold true; alignment: horizontal center, wrap true;')\n extra_row = 0\n if self.date:\n date_style = xlwt.easyxf('font: bold true; alignment: horizontal left, wrap true;')\n ws.write_merge(0,0,0,self.table.no_of_columns()-1,'Date : '+self.date,date_style) \n extra_row = 1\n for i in range(len(self.headings)):\n ws.write_merge(i+extra_row,i+extra_row,0,self.table.no_of_columns()-1,self.headings[i],heading_style)\n ws.set_panes_frozen(True)\n ws.set_horz_split_pos(len(self.headings)+extra_row+1)\n ws.set_remove_splits(True)\n self.table.to_xls(ws,start_row=len(self.headings)+extra_row,start_col=0)\n return self.wb", "def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True", "def print_report(stocks_to_print):\n\n print(\"=========== REPORT ============\")\n for stock in stocks_to_print:\n stock.print_one_line_report()", "def generate_service_odometer_xlsx_report(self, res, next_service):\n workbook = xlwt.Workbook()\n worksheet = workbook.add_sheet(\"next_service_by_odometer\")\n worksheet.col(0).width = 5000\n worksheet.col(1).width = 12500\n worksheet.col(2).width = 10000\n worksheet.col(3).width = 6000\n worksheet.col(4).width = 7500\n worksheet.col(5).width = 7500\n worksheet.col(6).width = 7500\n worksheet.col(7).width = 7500\n worksheet.col(8).width = 10000\n\n font = xlwt.Font()\n font.bold = True\n font.name = \"Arial\"\n font.height = 200\n # pattern = xlwt.Pattern()\n border = xlwt.easyxf(\"font: bold 1; font: name 1; font: height 200\")\n format1 = xlwt.easyxf(\n \"font: bold 1; font: name 1; font: height 200;\\\n pattern: pattern solid, fore_colour yellow;\"\n )\n xlwt.easyxf(\n \"font: bold 1; font: name 1; font: height 200\", num_format_str=\"DD/MM/YYYY\"\n )\n\n row = 0\n row += 1\n row += 1\n worksheet.write(row, 2, \"Scheduled Maintenance By Mileage\", format1)\n row += 3\n worksheet.write(row, 7, \"Date :\", format1)\n worksheet.write(row, 8, time.strftime(\"%d-%B-%Y\"), format1)\n row += 2\n worksheet.write(row, 0, \"NO.\", format1)\n worksheet.write(row, 1, \"VEHICLE ID\", format1)\n worksheet.write(row, 2, \"VIN NO.\", format1)\n worksheet.write(row, 3, \"MAKE\", format1)\n worksheet.write(row, 4, \"MODEL\", format1)\n worksheet.write(row, 5, \"LAST SERVICE DATE\", format1)\n worksheet.write(row, 6, \"LAST MILEAGE\", format1)\n worksheet.write(row, 7, \"NEXT MILEAGE\", format1)\n worksheet.write(row, 8, \"REGISTRATION STATE\", format1)\n line_row = row + 1\n line_col = 0\n counter = 1\n for obj in next_service:\n worksheet.write(line_row, line_col, counter, border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.name or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.vin_sn or \"\", border)\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.f_brand_id and obj.f_brand_id.name or \"\", border\n )\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.model_id and obj.model_id.name or \"\", border\n )\n line_col += 1\n date = \"\"\n if obj.last_service_date:\n date = format_date(\n self.env,\n obj.last_service_date,\n self._context.get(\"lang\"),\n date_format=False,\n )\n worksheet.write(line_row, line_col, date or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.odometer or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.due_odometer or \"\", border)\n line_col += 1\n # worksheet.write(line_row, line_col,\n # obj.vechical_location_id and\n # obj.vechical_location_id.name or '', border)\n line_col = 0\n line_row += 1\n counter += 1\n worksheet.write(line_row, line_col, \"********\", border)\n fp = io.BytesIO()\n workbook.save(fp)\n fp.seek(0)\n data = fp.read()\n fp.close()\n res = base64.encodebytes(data)\n return res", "def _generate_report(self):\n raise NotImplementedError", "def generate_waiter_financial_report_excel_file(self, staff_info, period, month_report, path):\n try:\n workbook = xlw.Workbook(path)\n worksheet = workbook.add_worksheet()\n\n file_header_format = workbook.add_format({\n 'font_size':20,\n 'align': 'center',\n 'valign': 'vcenter'\n })\n table_header_format = workbook.add_format({\n 'bold': 1,\n 'border': 1,\n 'align': 'center',\n 'valign': 'vcenter',\n 'font_size': 12,\n 'fg_color': '#C0C0C0'})\n cell_format = workbook.add_format({\n 'font_size': 12,\n 'align':'center',\n 'valign':'vcenter'\n })\n sum_format = workbook.add_format({\n 'font_size': 12,\n 'align': 'center',\n 'valign': 'vcenter',\n 'fg_color': '#99FF99'\n })\n\n worksheet.set_column('A:A', 10)\n worksheet.set_column('B:B', 30)\n worksheet.set_column('C:C', 20)\n worksheet.set_column('D:D', 20)\n worksheet.set_column('E:E', 20)\n worksheet.set_column('F:F', 10)\n worksheet.set_column('G:G', 15)\n\n worksheet.merge_range('A1:G2', f'{staff_info[3]} {staff_info[1]} {period}', file_header_format)\n\n row = 4\n column = 0\n\n for line in month_report:\n for item in line:\n if row == 4:\n worksheet.write(row, column, item.__str__(), table_header_format)\n else:\n if month_report.index(line) == len(month_report)-1 and line.index(item) == len(line)-1:\n worksheet.write(row, column, item.__str__(), sum_format)\n else:\n worksheet.write(row, column, item.__str__(), cell_format)\n column += 1\n row += 1\n column = 0\n\n workbook.close()\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.ministerial.get_excel_sheet(rpt_date, book)\n self.ministerial_auth.get_excel_sheet(rpt_date, book)\n self.ministerial_268.get_excel_sheet(rpt_date, book)\n self.quarterly.get_excel_sheet(rpt_date, book)\n self.by_tenure.get_excel_sheet(rpt_date, book)\n self.by_cause.get_excel_sheet(rpt_date, book)\n self.region_by_tenure.get_excel_sheet(rpt_date, book)\n self.indicator.get_excel_sheet(rpt_date, book)\n self.by_cause_10YrAverage.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 1')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'quarterly_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def to_xls(self,ws,start_row = 0,start_col = 0,width_ratio = 1): \n if self.col_width_dict: \n for c in range(self.no_of_columns()):\n ws.col(start_col+c).width = int(35*self.col_width(c)*width_ratio); \n \n boldstyle = xlwt.XFStyle()\n boldstyle.font.bold = True\n \n for r in range(self.no_of_rows()):\n for c in range(self.no_of_columns()):\n if r == 0:\n ws.write(start_row + r,start_col + c,self.cell(r,c),boldstyle)\n else:\n ws.write(start_row + r,start_col + c,self.cell(r,c))", "def generate_spreadsheet(request, id):\n election = get_object_or_404(Election, pk=id)\n response = render_to_response(\"django_elect/spreadsheet.html\", {\n 'full_stats': election.get_full_statistics(),\n })\n filename = \"election%s.xls\" % (election.pk)\n response['Content-Disposition'] = 'attachment; filename='+filename\n response['Content-Type'] = 'application/vnd.ms-excel; charset=utf-8'\n return response", "def pdf_report_generate(self, cnx, mysql=False, postgres=False):\n\n # Instantiating the controller helper class.\n aux = ControllerHelper()\n\n ret = aux._EXIT_SUCCESS\n\n # Instantiating the model class.\n model = ReporterModel()\n\n # Retrieving a list of all data items stored in the database.\n# (hdr_set, row_set) = model.get_all_data_items(cnx, mysql)\n\n # Retrieving a list of data items for a given date period.\n (hdr_set, row_set) = model.get_data_items_by_date(self.FROM, self.TO,\n cnx, mysql, postgres)\n\n # In case of getting an empty result set, informing the user.\n if (not(row_set)):\n ret = aux._EXIT_FAILURE\n\n print(__name__ + aux._COLON_SPACE_SEP + aux._ERROR_PREFIX\n + aux._COLON_SPACE_SEP + aux._ERROR_NO_DATA)\n\n return ret\n\n # ---------------------------------------------------------------------\n # --- Debug output - Begin --------------------------------------------\n # ---------------------------------------------------------------------\n dbg_output = PrettyTable(hdr_set)\n\n # Populating table rows.\n # Note: For PostgreSQL and SQLite databases the following simple loop\n # between dash separators is quite sufficient,\n # but for MySQL database it needs to decode\n # row_set cells.\n i = 0\n\n # ---------------------------------------------------------------------\n if (not(mysql)):\n # Simply traversing through row_set rows.\n while (i < len(row_set)):\n dbg_output.add_row(row_set[i])\n\n i += 1\n # ---------------------------------------------------------------------\n else:\n # Traversing through row_set rows with cells post-processing.\n while (i < len(row_set)):\n row_ary = row_set[i]\n\n j = 0\n\n # Decoding row_set cells.\n while (j < len(hdr_set)):\n if ((j != 4) and (j != 5)):\n row_ary[j] = row_ary[j].decode()\n\n j += 1\n\n dbg_output.add_row(row_ary)\n\n i += 1\n\n # Left-aligning table columns.\n dbg_output.align=\"l\"\n\n print(dbg_output)\n\n print(str(len(row_set)) + self._ROWS_IN_SET_FOOTER + aux._NEW_LINE)\n # ---------------------------------------------------------------------\n # --- Debug output - End ----------------------------------------------\n # ---------------------------------------------------------------------\n\n time.sleep(1) # <== Waiting one second... just for fun... :-)... -- OK.\n\n # ---------------------------------------------------------------------\n # --- Generating the PDF report - Begin -------------------------------\n # ---------------------------------------------------------------------\n pdf_report_path = self._get_pdf_report_path(__file__, aux)\n\n report = canvas.Canvas(pdf_report_path,\n pagesize=A4, # <== 210 x 297 mm.\n pdfVersion=(1, 4), # <== PDF version 1.4.\n # --- Page boxes ------------------------------------------------------\n# cropBox=( (10 / self.MM), (10 / self.MM), (200 / self.MM), (287 / self.MM)),\n# artBox=( (15 / self.MM), (15 / self.MM), (195 / self.MM), (282 / self.MM)),\n# trimBox=((210 / self.MM), (297 / self.MM) ),\n#bleedBox=( (5 / self.MM), (5 / self.MM), (205 / self.MM), (292 / self.MM))\n )\n\n # --- Report metadata -------------------------------------------------\n report.setTitle (self._REPORT_TITLE )\n report.setAuthor (self._REPORT_AUTHOR )\n report.setSubject (self._REPORT_SUBJECT )\n report.setKeywords(self._REPORT_KEYWORDS)\n report.setCreator (self._REPORT_CREATOR )\n\n # --- Page body (data) x MAX_PAGES ------------------------------------\n i = 0\n\n while (i < self.MAX_PAGES):\n ret = self._page_body_draw(report, hdr_set, row_set)\n\n if (ret == aux._EXIT_FAILURE):\n print(__name__ + aux._COLON_SPACE_SEP+aux._ERROR_PREFIX\n + aux._COLON_SPACE_SEP+aux._ERROR_NO_REPORT_GEN)\n\n return ret\n\n report.showPage()\n\n i += 1\n\n # Trying to save the report.\n try:\n report.save()\n except Exception as e:\n ret = aux._EXIT_FAILURE\n\n print(__name__ + aux._COLON_SPACE_SEP + aux._ERROR_PREFIX\n + aux._COLON_SPACE_SEP + str(e))\n\n return ret\n\n print(self._PDF_REPORT_SAVED_MSG + aux._COLON_SPACE_SEP\n + pdf_report_path)\n # ---------------------------------------------------------------------\n # --- Generating the PDF report - End ---------------------------------\n # ---------------------------------------------------------------------\n\n return ret", "def create_sheet(self):\n workbook = xlwt.Workbook()\n borders = Borders()\n header_border = Borders()\n header_border.left,header_border.right,header_border.top,header_border.bottom = Borders.THIN,Borders.THIN,Borders.THIN,Borders.THICK\n borders.left,borders.right,borders.top,borders.bottom = Borders.THIN,Borders.THIN,Borders.THIN,Borders.THIN\n header_bold = xlwt.easyxf(\"font: bold on, height 200; pattern: pattern solid, fore_colour gray25;alignment: horizontal center ,vertical center\")\n header_bold.borders=header_border\n body_style = xlwt.easyxf(\"font: height 200; alignment: horizontal left\")\n body_style.borders=borders\n \n ## style for different colors in columns\n xlwt.add_palette_colour(\"light_blue_21\", 0x21)\n workbook.set_colour_RGB(0x21, 153, 255, 255) \n qty_cell_style = xlwt.easyxf(\"font: height 200,bold on, name Arial; align: horiz right, vert center; pattern: pattern solid, fore_colour light_blue_21; borders: top thin,right thin,bottom thin,left thin\")\n \n xlwt.add_palette_colour(\"custom_orange\", 0x22)\n workbook.set_colour_RGB(0x22, 255, 204, 153)\n value_style = xlwt.easyxf(\"font: height 200,bold on, name Arial; align: horiz right, vert center; pattern: pattern solid, fore_colour custom_orange; borders: top thin,right thin,bottom thin,left thin\")\n \n xlwt.add_palette_colour(\"custom_mandys_pink\", 0x20)\n workbook.set_colour_RGB(0x20, 246, 228, 204)\n value_style2 = xlwt.easyxf(\"font: height 200,bold on, name Arial; align: horiz right, vert center; pattern: pattern solid, fore_colour custom_mandys_pink; borders: top thin,right thin,bottom thin,left thin\")\n \n \n xlwt.add_palette_colour(\"custom_yellow\", 0x25)\n workbook.set_colour_RGB(0x25, 255, 255, 179)\n blank_cell_style = xlwt.easyxf(\"font: height 200,bold on, name Arial; align: horiz center, vert center; pattern: pattern solid, fore_colour custom_yellow; borders: top thin,right thin,bottom thin,left thin\")\n return workbook,header_bold,body_style,qty_cell_style,value_style,blank_cell_style,value_style2", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_268_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def printFrame(self,outfile=None,sheet=None,filename=None,rowoffset=0,empty=\"\",delimiter=','):\n# \t\tprint outfile,filename,sheet\n# \t\tsys.exit()\n\n\t\tstyle=xlwt.XFStyle()\t\n\t\tstyle.num_format_str = '#0.00'\n\n\t\trows = [self.header]\n\t\tfor d in self.data:\n\t\t\trow = []\n\t\t\tfor h in self.header:\n\t\t\t\ttry:\n\t\t\t\t\trow.append(d[h])\n\t\t\t\texcept KeyError:\n\t\t\t\t\trow.append(empty)\n\t\t\trows.append(row)\n\n\n\t\tif outfile == None:\n\t\t\treturn rows\n\t\telif isinstance(outfile,xlwt.Workbook) and sheet and filename:\n\t\t\tif isinstance(sheet,xlwt.Worksheet):\n\t\t\t\tws=sheet\n\t\t\telse:\n\t\t\t\tws = outfile.add_sheet(sheetname)\n\t\t\tfor ri, row in enumerate(rows):\n\t\t\t\tfor ci, val in enumerate(row):\n# \t\t\t\t\tval = unicode(val).encode(\"utf8\")\n\t\t\t\t\t#ws.write(ri+rowoffset,ci,val,xlwt.easyxf(num_format_str='#0.00'))\n\t\t\t\t\tif ci >= 1 and ri >= 1:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tval = float(val)\n\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\tws.write(ri+rowoffset,ci,val,style)\n\t\t\toutfile.save(filename[:-3]+'xls')\n\t\telse:\n\t\t\twr = csv.writer(open(outfile,'w'),delimiter=delimiter)\n\t\t\twr.writerows(rows)", "def create_template(path_string) :\r\n today = datetime.now()\r\n today = today.strftime('%y%y%m%d%H%M%S')\r\n # print(today)\r\n temp_path = os.path.join(path_string, today)\r\n # temp_path = today\r\n # Create a workbook and add a worksheet.\r\n workbook = xlsxwriter.Workbook(f'{temp_path}.xlsx')\r\n worksheet0 = workbook.add_worksheet('ATR') # Defaults to Sheet1.\r\n worksheet1 = workbook.add_worksheet('ESS') # Data.\r\n worksheet2 = workbook.add_worksheet('Statistics') # Defaults to Sheet\r\n\r\n # Some data we want to write to the worksheet.\r\n Tests_List = ['Temp', 'SN', 'Output Power @ P1dBCP', 'Output Power Control Range/Resolution, FWD PWR Ind',\r\n 'Output IP3', 'LO Carrier Leakage', 'Sideband Suppression',\r\n 'Frequency Accuracy and Stability', 'A1 - Noise Figure vs. Gain', 'A1 - Gain variability',\r\n 'A1 - Image Suppression vs. Gain', 'Spurious',\r\n 'A2 - Noise Figure vs. Gain', 'A2 - Gain variability', 'A2 - Image Suppression vs. Gain',\r\n 'Average Power Consumption', 'Input Voltage', 'Digital Tests'\r\n ]\r\n\r\n # Start from the first cell. Rows and columns are zero indexed.\r\n row = 0\r\n # col = 0\r\n\r\n # Iterate over the data and write it out row by row.\r\n for index in range(3) :\r\n for i in range(len(Tests_List)) :\r\n worksheet0.write(row, i, Tests_List[i])\r\n worksheet1.write(row, i, Tests_List[i])\r\n worksheet2.write(row, i, Tests_List[i])\r\n # col += 1\r\n\r\n workbook.close()\r\n\r\n return today, temp_path", "def print_report_pdf(self):\n self.ensure_one()\n return self.env.ref('eliterp_sale_reports.action_report_product_catalogue').report_action(self)", "def print_stock_rotation_report(self):\n warehouses = False\n locations = False\n from_date = False\n to_date = False\n active_id = self.ids[0]\n today=datetime.now().strftime(\"%Y-%m-%d\")\n f_name = 'Stock Rotation Report' + ' ' + today\n stock_warehouse_obj = self.env['stock.warehouse']\n stock_locations_obj = self.env['stock.location']\n product_obj = self.env['product.product']\n \n if self.filtaration == 'warehouse':\n if not self.include_all_warehouse:\n if not self.warehouse_ids:\n raise ValidationError(\"please select the Warehouse.\")\n warehouses = self.warehouse_ids\n else:\n warehouses = stock_warehouse_obj.search([])\n else:\n if not self.include_all_location:\n if not self.location_ids:\n raise ValidationError(\"please select the Locations.\")\n locations = self.location_ids\n else:\n locations = stock_locations_obj.search([('usage','=','internal')])\n\n\n if not self.from_date:\n raise ValidationError(\"please select the From Date.\")\n \n if not self.to_date:\n raise ValidationError(\"please select the To Date.\")\n\n all_products = product_obj.with_context(active_test=True).search([('type','=','product')])\n from_date = self.from_date\n to_date = self.to_date\n \n date_1 = time.strptime(from_date, \"%Y-%m-%d\")\n date_2 = time.strptime(to_date, \"%Y-%m-%d\")\n if not (date_1 <= date_2):\n raise ValidationError(\"Fromdate is not previous then Todate\")\n self.get_stock_rotation_report(from_date,to_date,warehouses,locations,all_products)\n if self.datas:\n return {\n 'type' : 'ir.actions.act_url',\n 'url':'web/content/?model=stock.rotation.report&download=true&field=datas&id=%s&filename=%s.xls'%(active_id,f_name),\n 'target': 'new',\n }", "def create_xlsx(request):\n\n date_dict = income_date_parser(request)\n\n income_history = get_incomes_funds_ids(user_id=date_dict['user_id'],\n date_start=date_dict['start_date'],\n date_end=date_dict['finish_date'],\n time_diff=date_dict['utc_difference'])\n del income_history[-1]\n\n output, worksheet, workbook, formats_dict = creating_empty_xlsx_file()\n\n if income_history:\n head_row, head_col = 1, 1\n row, col = 2, 1\n for i in income_history[0]:\n if i != 'income_history_id':\n worksheet.write(head_row, head_col, i, formats_dict['head_format'])\n head_col += 1\n\n for history_dict in income_history:\n worksheet.write(row, col, history_dict['income'], formats_dict['cell_format'])\n worksheet.write(row, col + 1, history_dict['fund'], formats_dict['cell_format'])\n date = datetime.datetime.strptime(history_dict['date'], \"%Y-%m-%d\")\n worksheet.write_datetime(row, col + 2, date, formats_dict['date_format'])\n worksheet.write_number(row, col + 3, history_dict['amount'],\n formats_dict['value_format'])\n worksheet.write(row, col + 4, history_dict['comment'], formats_dict['cell_format'])\n col, row = 1, row + 1\n\n workbook.close()\n\n response = file_streaming_response \\\n ('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\n 'income_history.xlsx', output)\n return response", "def generate_excel(structure:dict, output:str):\t\n\n\tstructure_columns = identify_columns(structure)\n\n\tworkbook = xlsxwriter.Workbook(output)\n\tworksheet = workbook.add_worksheet()\n\n\tcol = 0\n\tfor column in structure_columns:\n\t\tworksheet.write(0, col, column)\n\t\tcol += 1\n\n\trow = 1\n\tfor day in structure['data']:\n\t\tfor key in day.keys():\n\t\t\tif isinstance(day[key], list):\n\t\t\t\tworksheet.write(row, structure_columns.index(key), ', '.join(day[key]))\n\t\t\telif isinstance(day[key], dict):\n\t\t\t\tworksheet.write(row, structure_columns.index(key), str(day[key]))\n\t\t\telse:\n\t\t\t\tworksheet.write(row, structure_columns.index(key), day[key])\n\t\trow += 1\n\t\n\tworksheet.freeze_panes(1, 1)\n\tworkbook.close()", "def write_xlsx(data):\n workbook = xlsxwriter.Workbook('MyWorkbook.xlsx')\n main_sheet = workbook.add_worksheet('MySheet')\n\n date_format = workbook.add_format(\n {'num_format': 'mm/dd/yy hh:mm:ss AM/PM'})\n length = str(len(data) + 1)\n \n main_sheet.add_table(('A1:D' + length), \n {'data': data,\n 'columns': [{'header': 'Department'}, {'header': 'Students'},\n {'header': 'Cumulative GPA'},\n {'header': 'Final Date',\n 'format': date_format}]})\n\n department_grades = workbook.add_chart({'type':'column'})\n department_grades.set_title(\n {'name':'Department and Grade distribution'})\n department_grades.add_series(\n {'categories':'=MySheet!$A$2:$A$5',\n 'values':'=MySheet!$C$2:$C$5'})\n main_sheet.insert_chart('A8', department_grades)\n workbook.close()", "def print_report_pdf(self):\n self.ensure_one()\n return self.env.ref('eliterp_sale_reports.action_report_product_sold').report_action(self)", "def excel_print(data1, data2, data3, data4, data5, data6):\r\n\r\n list_data = [data1, data2, data3, data4, data5, data6]\r\n name_list = ['Old elec', 'New elec', 'Old elec dup', 'New elec dup',\r\n 'Diff After Strip', 'New Elec Before Strip']\r\n zipped = zip(list_data, name_list)\r\n excel_writer = pd.ExcelWriter('elec_delta2.xlsx', engine='xlsxwriter')\r\n for data, name in zipped:\r\n data.to_excel(excel_writer, sheet_name=name,\r\n index=False, freeze_panes=(1, 0))\r\n num_cols = len(list(data))\r\n worksheet = excel_writer.sheets[name]\r\n worksheet.autofilter(0, 0, 0, num_cols-1)\r\n worksheet.set_column(0, 0, 23.56)\r\n worksheet.set_column(1, 1, 34.89)\r\n excel_writer.save()", "def report_table(self, filename='ODH_report'):\n table = []\n header = ['Source', 'Failure', 'Event failure rate, 1/hr', '# of',\n 'Total failure rate, 1/hr', 'Leak rate, SCFM',\n '# fans working', 'Fan rate, SCFM', 'Event duration, min',\n 'Oxygen concentration', 'Fatality prob', 'Case prob',\n 'Fatality rate, 1/hr']\n # 'Total failure rate', 'ODH protection PFD', 'Building is powered'\n table.append(header)\n self.fail_modes.sort(key=lambda x: x.source.name)\n for f_mode in self.fail_modes:\n table.append([\n f_mode.source.name,\n f_mode.name,\n (f_mode.leak_fr/f_mode.N).m_as(1/ureg.hr),\n f_mode.N,\n f_mode.leak_fr.m_as(1/ureg.hr),\n f_mode.q_leak.m_as(ureg.ft**3/ureg.min),\n f_mode.N_fan,\n f_mode.Q_fan.m_as(ureg.ft**3/ureg.min),\n f_mode.tau.m_as(ureg.min),\n f_mode.O2_conc,\n f_mode.F_i,\n f_mode.P_i/f_mode.leak_fr,\n f_mode.phi.m_as(1/ureg.hr)])\n filename += '.xlsx'\n with xlsxwriter.Workbook(filename) as workbook:\n header_format = workbook.add_format({'bold': True,\n 'font_size': 12,\n 'bottom': 3})\n worksheet = workbook.add_worksheet()\n col_width = [len(x) for x in table[0]]\n for row_n, row in enumerate(table):\n for col_n, data in enumerate(row):\n worksheet.write(row_n, col_n, data)\n if col_n in (0, 1, 10):\n # For source names, failure names\n # and 'Total failure rate'\n col_width[col_n] = max(col_width[col_n], len(str(data)))\n sci_format = workbook.add_format({'num_format': '0.00E+00'},)\n flow_format = workbook.add_format({'num_format': '#'},)\n percent_format = workbook.add_format({'num_format': '0%'},)\n number_format = workbook.add_format({'num_format': '0'},)\n worksheet.set_row(0, None, header_format)\n worksheet.set_column(2, 2, None, sci_format)\n worksheet.set_column(4, 4, None, sci_format)\n worksheet.set_column(5, 5, None, flow_format)\n worksheet.set_column(8, 8, None, sci_format)\n worksheet.set_column(9, 9, None, percent_format)\n worksheet.set_column(10, 12, None, sci_format)\n # Writing total/summary\n N_rows = len(table)\n N_cols = len(table[0])\n worksheet.write(N_rows+1, N_cols-2, 'Total fatality rate, 1/hr')\n worksheet.write(N_rows+1, N_cols-1,\n self.phi.m_as(1/ureg.hr))\n worksheet.write(N_rows+2, N_cols-2, 'ODH class')\n worksheet.write(N_rows+2, N_cols-1, self.odh_class(),\n number_format)\n # Autofit column width\n for col_n, width in enumerate(col_width):\n adj_width = width - 0.005 * width**2\n worksheet.set_column(col_n, col_n, adj_width)\n # Adding usability\n worksheet.conditional_format(\n 1, N_cols-1, N_rows-1, N_cols-1,\n {'type': '3_color_scale', 'min_color': '#008000',\n 'max_color': '#FF0000'})\n worksheet.freeze_panes(1, 0)", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_10yr_average_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_tenure_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def run(self):\n report_file = self.get_report_file_name()\n self.export_records_to_file(report_file)\n print(\"Report file ({}) generated.\".format(report_file))", "def report(self, tsd, output_folder, basename):\n if self.testing:\n from cea.utilities import reporting\n reporting.full_report_to_xls(tsd, output_folder, basename, self)", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_auth_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_indicator_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def generate_report(self) -> Report:\n # equity_curve = self._generate_equity_curve()\n # summary_stats = self._generate_summary_stats(equity_curve)\n # return Report(equity_curve, summary_stats)\n pass", "def report(self, **options):\n pass", "def generate_xml_report(self, parser, data, objects,context=None):\n raise NotImplementedError()", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def excel_out(employees_dict, path):\n # Create workbook and worksheet\n try:\n workbook = xlsxwriter.Workbook(path)\n except:\n return False\n worksheet = workbook.add_worksheet(name='Прокуратура')\n # Add format to workbook\n format_headers_po = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 14,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFCA28',\n 'border': 2})\n format_headers_department = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 13,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFD54F',\n 'border': 2})\n format_headers_division = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFE082',\n 'border': 2})\n format_header = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFF59D',\n 'border': 2})\n employee_format_b = workbook.add_format( {'align': 'left',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'bold': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'border': 2})\n employee_format = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'border': 2})\n format_attribute = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'font_size': 10,\n 'font_name': 'Times New Roman',\n 'border': 1})\n\n # Set width of columns and height of rows\n worksheet.set_default_row(40, False)\n worksheet.set_column(0, 0, 5)\n worksheet.set_column(1, 1, 25)\n worksheet.set_column(2, 2, 21)\n worksheet.set_column(3, 3, 21)\n worksheet.set_column(4, 4, 21)\n\n # Begin from row\n row = 0\n\n # Parser for employees dictionary\n for po in employees_dict:\n # Прокуратура\n worksheet.merge_range(row, 0, row, 4, data=po.name, cell_format=format_headers_po)\n row += 1\n # Атрибуты Прокуратуры\n row = add_attribute(po, worksheet, row, format_attribute)\n # Header\n row = add_header(worksheet, row, format_header)\n # Работники Прокуратуры\n if 'employees' in employees_dict[po]:\n for num, employee in enumerate(employees_dict[po]['employees'], 1):\n row = add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n\n # Управление\n if 'departments' in employees_dict[po]:\n for department in employees_dict[po]['departments']:\n worksheet.merge_range(row, 0, row, 4, data=department.name, cell_format=format_headers_department)\n row += 1\n # Атрибуты Управления\n row = add_attribute(department, worksheet, row, format_attribute)\n # Работники Управления\n if 'employees' in employees_dict[po]['departments'][department]:\n for num, employee in enumerate(employees_dict[po]['departments'][department]['employees'], 1):\n row = add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n # Отдел Управления\n if 'divisions' in employees_dict[po]['departments'][department]:\n for division in employees_dict[po]['departments'][department]['divisions']:\n worksheet.merge_range(row, 0, row, 4, data=division.name, cell_format=format_headers_division)\n row += 1\n # Атрибуты Отдела\n row = add_attribute(division, worksheet, row, format_attribute)\n # Работники Отдела\n for num, employee in enumerate(employees_dict[po]['departments'][department]['divisions'][division], 1):\n row = add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n\n # Отдел Прокуратуры\n if 'divisions' in employees_dict[po]:\n for division in employees_dict[po]['divisions']:\n worksheet.merge_range(row, 0, row, 4, data=division.name, cell_format=format_headers_division)\n row += 1\n # Атрибуты Отдела\n row = add_attribute(division, worksheet, row, format_attribute)\n # Работники Отдела\n for num, employee in enumerate(employees_dict[po]['divisions'][division], 1):\n row += add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n try:\n workbook.close()\n except:\n return False\n return True", "def GenerateReport(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('generateReport', payload=payload, response_object=None)", "def export(self):\n rpt_date = datetime.now()\n filename = 'bushfire_regionbytenure_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def _gen_report(self):\n print \"------------------------------------------\"\n print \"fio report\"\n print \"------------------------------------------\"\n print \"name\", \" \".join(f for f in FIELDS)\n # print fields\n for name in sorted(self.reports):\n report = self.reports[name]\n #print report\n print name, \" \".join(str(report.get(f)) for f in FIELDS)\n\n print \"*******************************************\"\n # print clats\n index = 0\n for name in sorted(self.reports):\n report = self.reports[name]\n if index == 0:\n print \"clat_percent\", \" \".join(\n str(c[0]) for c in report[\"clats\"])\n print name, \" \".join(str(c[1]) for c in report[\"clats\"])\n index += 1", "def printing():\r\n document.add_heading('Printing Service details', 1)\r\n\r\n printing_metrics = ['customproperties',\r\n 'workingSetSizeHiPct',\r\n 'logVerbosityAuditActivity',\r\n 'logVerbosityService',\r\n 'hostname',\r\n 'tags']\r\n\r\n printnodes = get_qlik_sense.get_printing()\r\n num_of_nodes = len(printnodes)\r\n num_of_print_metrics = len(printing_metrics)\r\n table = document.add_table(rows=num_of_print_metrics+1, cols=num_of_nodes+1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'Metric'\r\n for item in range(0, num_of_nodes):\r\n row.cells[item+1].text = printnodes[item][6]\r\n for item in range(num_of_print_metrics):\r\n row = table.rows[item+1]\r\n row.cells[0].text = str(printing_metrics[item])\r\n for printnode in range(num_of_nodes):\r\n row.cells[printnode+1].text = str(printnodes[printnode][item])\r\n\r\n document.add_page_break()", "def report(self):\n #i need to figure out how to pass all these in a list or something, woof.\n self.report_generator_module.run(\\\n self.total,\\\n self.unique,\\\n self.top_10,\\\n self.top_10_base,\\\n self.lengths,\\\n self.counts,\\\n self.one_to_six,\\\n self.trailing_number,\\\n self.last_1digit,\\\n self.last_2digit,\\\n self.last_3digit,\\\n self.last_4digit,\\\n self.last_5digit,\\\n self.charset)", "def generate_report(release, spec, report_week):\n\n logging.info(u\"Generating the report ...\")\n\n report = {\n u\"html\": generate_html_report,\n u\"pdf\": generate_pdf_report\n }\n\n for report_format in spec.output[u\"format\"]:\n report[report_format](release, spec, report_week)\n\n archive_input_data(spec)\n\n logging.info(u\"Done.\")", "def generate_report_for_paper(self):\n paper_report = self.metrics_calculator.report_metrics(report_type=\"paper\")\n class_numbers = sorted(self.idx2labelname_mapping.keys(), reverse=False)\n row_names = [\n f\"class_{class_num} - ({self.idx2labelname_mapping[class_num]})\"\n for class_num in class_numbers\n ]\n row_names.extend([f\"Micro-Fscore\", f\"Macro-Fscore\"])\n return paper_report, row_names", "def print_quick_report():\r\n print('function not yet written')\r\n # print a summary of the report as a structured pandas dataframe\r\n #Summary will include only date title and sentiment\r", "def print_report():\n width = 68\n print(\"-\" * width)\n header = (\"Donor Name\", \"Total Given\", \"Num Gifts\", \"Average Gift\")\n print(\"{:20} | {:15} | {:10} | {:12}\".format(*header))\n print(\"-\" * width)\n for index, donor in enumerate(donors_data):\n name = donor[\"name\"]\n total = sum(donor[\"donations\"])\n num_gift = len(donor[\"donations\"])\n average = total/num_gift\n print(\"{:22} ${:12,.2f} {:12d} ${:12,.2f}\".format(name, total, num_gift, average ))\n print(\"-\" * width)", "def toExcel(self, outFileName):\n workbook = Workbook(outFileName, {'constant_memory': True})\n workbook.use_zip64() # allow large size Excels just in case\n\n wks = workbook.add_worksheet('Distribution Fitting')\n hdrFmt = workbook.add_format({'bold' : True,\n 'underline' : True,\n 'align' : 'center'})\n resultFormats = [workbook.add_format({'num_format' : fmtStr}) \\\n for fmtStr in ['0.000000', '0.0000%']]\n\n row = 0\n wks.set_column(0, 0, 11)\n wks.set_column(1, 1, 8, resultFormats[0])\n wks.set_column(2, 2, 10.6, resultFormats[1])\n for col, headerName in enumerate(self.getHeaderList()):\n wks.write_string(row, col, headerName, hdrFmt)\n\n for distrName, (results, params) in self.result.iteritems():\n row += 1\n col = 0\n wks.write_string(row, col, distrName)\n for col, (result, outFormat) in \\\n enumerate(itertools.izip(results, resultFormats), col+1):\n wks.write_number(row, col, result, outFormat)\n for col, paramValue in enumerate(params, col+1):\n wks.write_number(row, col, paramValue)\n\n workbook.close()", "def create_report(self):\n # Base setup\n line_out = ''\n line_out += \"{:<15} | {:^15} | {:^30}\\n\".format(\"Name\", \"Donations\", \"Email\")\n line_out += (\"-\"*65)\n print(line_out)\n\n # Setup line format to recieve ordered donor info \n for name in self.all_donors:\n line = \"{:<15} | {:^15} | {:^30}\".format(name, self.r.hget(name, 'donations'), self.r.hget(name, 'email'))\n print(line)", "def export_to_excel(self, worksheet, row_start, col_start, queryset, date_time=timezone.now()):\n\t\tif queryset:\n\t\t\t[row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start)\n\t\t\tfor q in queryset:\n\t\t\t\t# object_excel_write function---date_time uyiin history objectiig excel -ruu horvuulne\n\t\t\t\t[row_write, col_write] = q.object_excel_write(worksheet, row_write, col_write, date_time=date_time)\n\t\telse:\n\t\t\tworksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')", "def write_excel(self, filename):\n writer = pd.ExcelWriter(filename)\n self.df_avg.to_excel(writer, 'Simulation')\n self.manager_df.to_excel(writer, 'FleetManagers')\n self.customer_df.to_excel(writer, 'Customers')\n self.transport_df.to_excel(writer, 'Transports')\n writer.save()", "def dataframe_to_excel(df, sheet_title, project_constants_lst, \n current_date=str(date.today()), force_flag = False, freeze_column='A'):\n \n project_steps_df, max_title, _, report_requisites_sr, *_ = project_constants_lst\n report_type, export_flag, df_decription = project_steps_df.loc[sheet_title, ['report_type', 'export_to_excel', 'description']].values\n \n # check DataFrame report type to save\n if report_type == 'report':\n report_mark = report_requisites_sr['project_title'] + '_tables'\n else:\n report_mark = report_type\n \n # construct excel filename\n file_name = report_requisites_sr['customer_name'] + '_' + report_mark + '_' + current_date + '.xlsx'\n\n # information string\n info = f'Exporting {sheet_title} table to {report_mark} file'\n print(info, end =\" \")\n file_path = os.path.join(report_requisites_sr['today_report_folder'], file_name)\n \n # save DataFrame to excel file if export_to_excel trigger is ON\n # and DataFrame is not empty\n if (force_flag or export_flag) and not df.empty:\n fsop.create_folder(report_requisites_sr['today_report_folder'], max_title, display_status=False)\n file_mode = 'a' if os.path.isfile(file_path) else 'w'\n df = df.apply(pd.to_numeric, errors='ignore')\n try:\n if_sheet_exists_param = 'replace' if file_mode == 'a' else None\n content_df, item_exist = generate_table_of_contents(file_path, file_mode, sheet_title, df_decription)\n df_flat = drop_multindex(df)\n # write table of contents and data dataframe to the excel file\n with pd.ExcelWriter(file_path, mode=file_mode, if_sheet_exists=if_sheet_exists_param, engine='openpyxl') as writer:\n if file_mode == 'w' or not item_exist:\n content_df.to_excel(writer, sheet_name='Содержание', index=False)\n df_flat.to_excel(writer, sheet_name=sheet_title, startrow=2, index=False)\n # format table of contents and data worksheets\n workbook = openpyxl.load_workbook(file_path)\n format_workbook(workbook, sheet_title, df_decription, freeze_column)\n workbook.save(file_path)\n except PermissionError:\n status_info('fail', max_title, len(info))\n print('\\nPermission denied. Close the file.\\n')\n sys.exit()\n else:\n status_info('ok', max_title, len(info))\n return file_path \n else:\n # if save key is on but DataFrame empty\n if project_steps_df.loc[sheet_title, 'export_to_excel'] and df.empty:\n status_info('no data', max_title, len(info))\n else: \n status_info('skip', max_title, len(info))\n return None", "def create_output_file(self):\r\n self.output_file = openpyxl.Workbook()", "def report(self, fileName = None):\n\n header = ARCPY.GetIDMessage(84200)\n columns = [ARCPY.GetIDMessage(84191), ARCPY.GetIDMessage(84201), \n ARCPY.GetIDMessage(84202)]\n results = [ columns ]\n for case in self.uniqueCases:\n if not self.caseField:\n strCase = \"ALL\"\n else:\n strCase = UTILS.caseValue2Print(case, self.caseIsString)\n cfOIDs, minSumDist = self.cf[case]\n cfOIDs = [ str(i) for i in cfOIDs ]\n cfOIDs = \", \".join(cfOIDs)\n rowResult = [ strCase, \n cfOIDs,\n LOCALE.format(\"%0.6f\", minSumDist) ]\n results.append(rowResult)\n\n outputTable = UTILS.outputTextTable(results, header = header)\n if fileName:\n f = UTILS.openFile(fileName, \"w\")\n f.write(outputTable)\n f.close()\n else:\n ARCPY.AddMessage(outputTable)", "def create_report(m_df: DataFrame, u_df: DataFrame, server: str='JEFF', ID: str='11', date=datetime.now().strftime('%Y%m%d %H%M%S')):\r\n m_df = prepare_output_df(m_df, 'm')\r\n u_df = prepare_output_df(u_df, 'u')\r\n\r\n with pd.ExcelWriter(f'{BASE_DIR}/report_{server}_{ID}_{date}.xlsx') as o:\r\n m_df.to_excel(o, sheet_name='Matched', index=False)\r\n u_df.to_excel(o, sheet_name='Unmatched', index=False)", "def print_report(self, stream):\n stream.write(ET.tostring(self.xml()))", "def get_stock_rotation_report(self,from_date,to_date,warehouses,locations,all_products):\n workbook,header_bold,body_style,qty_cell_style,value_style,blank_cell_style,value_style2=self.create_sheet()\n workbook,sheet_data,row_data = self.add_headings(from_date,to_date,warehouses,locations,workbook,header_bold,body_style,qty_cell_style,value_style,blank_cell_style)\n if locations:\n data_dict = self.prepare_data_with_location(from_date, to_date, locations, all_products)\n else:\n data_dict = self.prepare_data_with_warehouse(from_date, to_date, warehouses, all_products)\n product_data_dict = self.print_date_in_sheet(data_dict,workbook,sheet_data,row_data,body_style,qty_cell_style,value_style,blank_cell_style,value_style2)\n workbook,worksheet_all_stock_rotation = self.create_all_inventory_sheet(from_date,to_date,workbook,sheet_data,row_data,body_style,qty_cell_style,value_style,blank_cell_style,header_bold)\n self.print_all_rotation_sheet_data(product_data_dict,workbook,worksheet_all_stock_rotation,body_style,qty_cell_style,value_style,blank_cell_style,value_style2)\n fp = BytesIO() \n workbook.save(fp)\n fp.seek(0)\n report_date = base64.encodestring(fp.read())\n fp.close()\n self.write({'datas':report_date})\n return True", "def export_excel(self, filename):\n # convert table to array of rows\n rows = [self.headings]\n for y in range(self.rowcount):\n row = []\n for h in self.headings:\n row.append(self.table[h][y])\n rows.append(row)\n \n sheet = pyexcel.Sheet(rows, self.name, name_columns_by_row=0)\n sheet.save_as(filename)", "def pretty_print_report(self, report, header):\n\n\t\tfor item in header:\n\t\t\tprint('|{:<15}'.format(item), end='')\n\t\tprint('|')\n\n\t\tfor row in report:\n\t\t\tfor column in row:\n\t\t\t\tprint('|{:<15}'.format(column), end='')\n\t\t\tprint('|')", "def report(self, output_dir):", "def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)", "def save_xls(self,basepath=''): \n self.generate_xls()\n self.wb.save(basepath+self.filename+'.xls')", "def create_report():\n names, totals, num_gifts, avg_gift = get_donor_summary(donors)\n print(f\"Donor Name{'':<20} | Total Given{'':>0} | Num Gifts{'':>0} | Average Gift{'':>0}\")\n print(f\"-\" * 72)\n for name, total, num_gift, avg_gift in zip(names, totals, num_gifts, avg_gift):\n print(f\"{name:<32}${total:>11}{num_gift:>12} ${avg_gift:>13}\")\n return None", "def run_report():\n data_lines = data_base.report_list\n sizes = [0] * len(data_lines[0])\n for line in data_lines:\n for item_index, item in enumerate(line):\n if len(item) > sizes[item_index]:\n sizes[item_index] = len(item)\n\n for line in data_lines:\n print(line[0].ljust(sizes[0]) + \" $ \" + line[1].ljust(sizes[1]) + \" \" + line[2].rjust(\n sizes[2]) + \" \\t \" + line[3].rjust(sizes[3]))\n print(\"\\n\")", "def generate_report(df, start_date, end_date):\n # Remove any transactions that had to do with collecting or returning security\n security_df = df[(df[CATEGORY] == 'Security') | (df[CATEGORY] == 'Security-Income')]\n df = df[(df[CATEGORY] != 'Security')]\n\n # Exclude the data for everything except our quarter\n period_data = df.loc[start_date:end_date] # Note: NOT using extended quarter range\n rental_income = period_data[period_data[CATEGORY] == 'Rent']\n utilities = period_data[(period_data[CATEGORY] == 'Utilities')]\n other_income = period_data[(period_data['Transaction Type'] == 'credit') & (period_data[CATEGORY] != 'Rent')]\n expenses = period_data[(period_data['Transaction Type'] == 'debit')]\n unpaid_util_overages = float(0)\n\n # print(rental_income)\n # print(other_income)\n # print(expenses)\n \n html_config.initialize()\n print(html_config.HTML_OPEN)\n\n print('<H1>Income and Expense Report for %s-%s:' % (start_date, end_date), '</H1><p>')\n\n # List all unit specific rents and expenses for the quarter\n for UNIT in sorted(rental_income['Unit'].unique()):\n # Show rental income info\n temp_df = rental_income[rental_income['Unit'] == UNIT]\n print('<br><H2>Total rent for Unit ', UNIT, ': ${:,.2f}'.format(temp_df['Amount'].sum()), '</H2>')\n print(temp_df[['Description', 'Amount']].to_html())\n \n if not SKIP_UTIL_ANALYSIS:\n # Show utilities payments and calculate any overage due\n temp_df = utilities[(utilities['Unit'] == UNIT) & (utilities['Transaction Type'] == 'debit')]\n print('<br><H2>Utilities Expenses for Unit', UNIT, ': ${:,.2f}'.format(temp_df['Amount'].sum()), '</h2>')\n overage = temp_df.assign(Overage=lambda x: x.Amount - limit_df.loc[UNIT].Amount)\n # Disable warning when setting negative overage values to zero\n pd.set_option('mode.chained_assignment', None)\n overage.Overage[overage.Overage < 0] = 0\n pd.set_option('mode.chained_assignment', 'warn')\n print(overage[['Description', 'Amount', 'Overage']].to_html())\n # Show if utilties costs exceeded allotted amount\n if (overage['Overage'].sum() > 0):\n print('<h3>Utilities overage for unit', UNIT, ': ${:,.2f}'.format(overage['Overage'].sum()), '</h3>')\n unpaid_util_overages += overage['Overage'].sum()\n # Show any untilities that were collected \n overage_collected = utilities[(utilities['Unit'] == UNIT) & (utilities['Transaction Type'] == 'credit')]\n if not overage_collected.empty:\n print('<br><H2>Util overages collected for Unit ', UNIT, ': ${:,.2f}'.format(overage_collected['Amount'].sum()), '</H2>')\n print(overage_collected[['Description', 'Amount', CATEGORY]].to_html())\n print('<h3>Net Utils overage for unit', UNIT, 'for period: ${:,.2f}'.format(overage['Overage'].sum() - overage_collected['Amount'].sum()), '</h3>')\n \n\n \n # Generate unit specific Utility usage reports\n if GEN_TENANT_UTIL_REPORTS and OUTPUT_DIRECTORY:\n TENANT_FILE = '%s/122-Spring-St-%s-%s-Unit-%s-utils.html' % (OUTPUT_DIRECTORY, start_date, end_date, UNIT)\n TENANT_REPORTS.append(TENANT_FILE)\n sys.stdout = open(TENANT_FILE, 'w')\n print(html_config.HTML_OPEN)\n\n print('<H1>Unit', UNIT, '</H1>')\n print('<br><H2>Utilities Expenses for: %s-%s' % (start_date, end_date))\n print('<br>Utilites included in rent: ${:,.2f}'.format(limit_df.loc[UNIT].Amount))\n print('</H2>')\n print(overage[['Description', 'Amount', 'Overage']].to_html())\n # Show if any utilties overage may be due\n if (overage['Overage'].sum() > 0):\n print('<h3>Utilities overage for unit', UNIT, ': ${:,.2f}'.format(overage['Overage'].sum()), '</h3>')\n if not overage_collected.empty:\n print('<br><H2>Util overages collected for Unit ', UNIT, ': ${:,.2f}'.format(overage_collected['Amount'].sum()), '</H2>')\n print(overage_collected[['Description', 'Amount', CATEGORY]].to_html())\n print('<h3>Net Utils overage for unit', UNIT, 'for period: ${:,.2f}'.format(overage['Overage'].sum() - overage_collected['Amount'].sum()), '</h3>')\n\n print(html_config.HTML_CLOSE)\n\n # Restore stdout to the main report file\n sys.stdout = open(REPORT_FILE, 'a')\n \n # Show other unit specific transactions\n if SKIP_UTIL_ANALYSIS:\n unit_exp = expenses[(expenses['Unit'] == UNIT)]\n unit_income = other_income[other_income['Unit'] == UNIT]\n else:\n unit_exp = expenses[(expenses['Unit'] == UNIT) & (expenses[CATEGORY] != 'Utilities')]\n unit_income = other_income[(other_income['Unit'] == UNIT) & (other_income[CATEGORY] != 'Utilities')]\n \n if not unit_exp.empty:\n print('<br><H2>Other Unit specific expenses for: ', UNIT, ': ${:,.2f}'.format(unit_exp['Amount'].sum()), '</h2>')\n print(unit_exp[['Description', 'Amount', 'Unit', CATEGORY]].to_html())\n print('<p>')\n \n # Show any other unit specific credit\n other_income = other_income[other_income['Unit'] == UNIT]\n if not other_income.empty:\n print('<br><H2>Expense offsets for Unit ', UNIT, ': ${:,.2f}'.format(other_income['Amount'].sum()), '</H2>')\n print(other_income[['Description', 'Amount', CATEGORY]].to_html())\n \n # Add a line between units\n print('<hr>')\n \n # List the shared income and expenses for the quarter\n temp_df = other_income[other_income['Unit'].isnull()]\n if not temp_df.empty:\n print ('<br><H2>Non unit specific income: ${:,.2f}'.format(temp_df['Amount'].sum()), '</h2>')\n print(temp_df[['Description', 'Amount', CATEGORY]].to_html())\n gen_expenses = expenses[expenses['Unit'].isnull()]\n if not gen_expenses.empty:\n print ('<br><H1>Non unit specific expenses</h1>')\n # Get the list of expense categories and generate summary for each\n for category in sorted(gen_expenses[CATEGORY].unique()):\n temp_df = gen_expenses[(gen_expenses[CATEGORY] == category)]\n print ('<br><H2>'+ category +': ${:,.2f}'.format(temp_df['Amount'].sum()), '</h2>')\n print(temp_df[['Description', 'Amount', CATEGORY]].to_html())\n \n # If there were any security transactions in the period give a security report\n if not security_df.loc[start_date:end_date].empty:\n temp_df = security_df.loc[start_date:end_date] \n print('<hr><H2>Security related transactions:</H2>')\n print(temp_df[['Description', 'Amount', 'Transaction Type', 'Unit']].to_html())\n for UNIT in sorted(rental_income['Unit'].unique()):\n unit_df = security_df[security_df['Unit'] == UNIT]\n collected = unit_df[(unit_df['Transaction Type'] == 'credit')]['Amount'].sum()\n returned = unit_df[(unit_df['Transaction Type'] == 'debit')]['Amount'].sum()\n print('<center><H4>Current Liability on Unit '+str(UNIT)+': ${:,.2f}'.format(collected-returned), '</H4></center>')\n \n # # Summarize the periods income and expenses -- old way to be discarded...\n # print('<br><H3>Total income for period: ${:,.2f}'.format(rental_income['Amount'].sum()), '</H3>')\n # print('<H3>Total expense for period: ${:,.2f}'.format(expenses['Amount'].sum() - other_income['Amount'].sum()), '</H3>')\n # print('<H3>Profit for period: ${:,.2f}'.format(rental_income['Amount'].sum() + other_income['Amount'].sum() -expenses['Amount'].sum()), '</H3>')\n # Summarize the periods income and expenses\n print('<br><H3>Total income for period: ${:,.2f}'.format(rental_income['Amount'].sum()))\n print('<br><H3>Total expense for period: ${:,.2f}'.format(expenses['Amount'].sum() - other_income['Amount'].sum()), '</H3>')\n print('<H3>Profit for period: ${:,.2f}'.format(rental_income['Amount'].sum() + other_income['Amount'].sum() -expenses['Amount'].sum()), '</H3>')\n# print('</H3>')\n \n print(html_config.HTML_CLOSE)\n sys.stdout.flush()", "def excel_output(df):\n output = io.BytesIO()\n #time = str(date.today())\n #filename = \"output \"+time+\".xlsx\"\n writer = pd.ExcelWriter(output, engine='xlsxwriter', options={'remove_timezone': True})\n #writer.book.filename = io\n df.to_excel(writer,'Sheet1', index=False, header=True)\n writer.save()\n xlsx_data = output.getvalue()\n return xlsx_data", "def print_all(self):\n if not request:\n raise exceptions.Warning(_(''), _(''))\n session_id = request.session.sid\n config = self.env['ir.config_parameter']\n addons_url = config.get_param('addons_path')\n phantomjs_path = config.get_param('phantomjs_path')\n phantomjs_path = 'phantomjs' if not phantomjs_path else phantomjs_path\n print_url = self.env.context.get('protocol_url', False)\n if print_url:\n print_urls = [print_url]\n else:\n print_urls = self._get_print_urls()\n if not print_urls:\n return\n phantom = [\n phantomjs_path,\n addons_url +\n '/quality_protocol_report/static/src/js/phantom_url_to_pdf.js',\n session_id, \"/tmp\"] + print_urls\n process = subprocess.Popen(phantom)\n process.communicate()\n filenames = []\n for url in print_urls:\n fname = url.replace('/', '').replace(':', '')\n weight_pos = fname.find('?weight=')\n if weight_pos > -1:\n fname = fname[weight_pos+8:weight_pos+10] + '-' + fname[:weight_pos]\n filenames.append('/tmp/' + fname + '.pdf')\n filepath = self._merge_pdf(sorted(filenames))\n fildecode = open(filepath, 'r')\n encode_data = fildecode.read()\n fildecode.close()\n active_model = self.env.context.get('active_model', False)\n active_id = self.env.context.get('active_id', False)\n ungrouped_also = self.env.context.get('print_ungrouped_also', False)\n if active_model and active_id and not ungrouped_also:\n active_name = self.env[active_model].browse([active_id]).name\n else:\n dt = fields.Datetime.context_timestamp(self, datetime.now())\n active_name = dt.strftime('%d-%m-%Y_%Hh%M')\n filename = 'protocolo.pdf' if print_url else \\\n 'protocolos_' + str(active_name).lower() + '.pdf'\n attachment_data = {\n 'name': filename,\n 'datas_fname': filename,\n 'datas': base64.b64encode(encode_data),\n 'res_model': active_model,\n 'res_id': 0 if print_url else self.env.context.get('active_id', False),\n }\n self.env['ir.attachment'].search(\n [('name', '=', attachment_data['name']),\n ('res_id', '=', attachment_data['res_id']),\n ('res_model', '=', attachment_data['res_model'])]).unlink()\n attachment = self.env['ir.attachment'].create(attachment_data)\n\n filenames.append(filepath)\n for my_file in filenames:\n os.remove(my_file)\n\n if print_url:\n return {\n 'type': 'ir.actions.act_url',\n 'url': '/web/binary/saveas?model=ir.attachment&field=datas' +\n '&filename_field=name&id=%s' % (attachment.id),\n 'target': 'self',\n }\n else:\n return {'type': 'ir.actions.act_window_close'}", "def gen_report(a_dict):\n\n # header = \"{:<20} |{:^10}|{:^10}|{:>10}\".format(\"Donor Name\",\"Total Given\",\"Num Gifts\",\"Average Gift\")\n\n header = \"{:<20} |{:^13}|{:^13}|{:>13}\".format(\"Donor Name\",\"Total Given\",\"Num Gifts\",\"Average Gift\")\n\n print(header)\n for k,v in a_dict.items():\n total, num, avg = gen_stats(v)\n row = \"{:<20} ${:^13} {:^13}${:>13}\".format(k,total,num,avg)\n print(row)", "def _write_report_content(self):\n self.worksheet.batch_update(self.report_data)\n gsfmt.format_cell_ranges(self.worksheet, self.report_formatting)", "def print_all_rotation_sheet_data(self,product_data_dict,workbook,worksheet_all_stock_rotation,body_style,qty_cell_style,value_style,blank_cell_style,value_style2):\n row=4\n column=0\n for product_id,product_data in product_data_dict.iteritems():\n worksheet_all_stock_rotation.row(row).height = 350\n opening_stock = product_data.get('opening_qty') or 0\n qty_purchase = product_data.get('qty_purchase_in_duration') or 0\n qty_sale = product_data.get('qty_sales_in_duration') or 0\n scap_qty = product_data.get('scrap_location_qty') or 0\n warehouse_in_qty = product_data.get('warehouse_in_qty') or 0\n warehouse_out_qty = product_data.get('warehouse_out_qty') or 0\n \n closing_qty = (opening_stock+qty_purchase+warehouse_in_qty)-(qty_sale+scap_qty+warehouse_out_qty)\n last_purchase_date = ''\n last_sales = ''\n last_purchase_date_lst = filter(None, product_data.get('last_purchase_date'))\n if any(last_purchase_date_lst):\n last_purchase_date = max(last_purchase_date_lst)\n last_purchase_date = last_purchase_date.strftime('%d-%m-%Y') or ''\n last_sales_lst = filter(None, product_data.get('last_sales'))\n if any(last_sales_lst):\n last_sales = max(last_sales_lst)\n last_sales = last_sales.strftime('%d-%m-%Y') or ''\n \n worksheet_all_stock_rotation.write(row,column,product_id.default_code,body_style)\n worksheet_all_stock_rotation.write(row,column+1,product_id.name or '',body_style)\n worksheet_all_stock_rotation.write(row,column+2,product_id.standard_price or 0,qty_cell_style)\n worksheet_all_stock_rotation.write(row,column+3,product_id.lst_price or 0,qty_cell_style)\n worksheet_all_stock_rotation.write(row,column+4,None,blank_cell_style)\n worksheet_all_stock_rotation.write(row,column+5,opening_stock,value_style2)\n worksheet_all_stock_rotation.write(row,column+6,qty_purchase,value_style2)\n worksheet_all_stock_rotation.write(row,column+7,qty_sale,value_style2)\n worksheet_all_stock_rotation.write(row,column+8,scap_qty,value_style2)\n worksheet_all_stock_rotation.write(row,column+9,product_data.get('adjusted_qty_in_duration') or 0,value_style2)\n worksheet_all_stock_rotation.write(row,column+10,closing_qty,value_style2)\n worksheet_all_stock_rotation.write(row,column+11,None,blank_cell_style)\n worksheet_all_stock_rotation.write(row,column+12,warehouse_in_qty,value_style)\n worksheet_all_stock_rotation.write(row,column+13,warehouse_out_qty,value_style)\n worksheet_all_stock_rotation.write(row,column+14,None,blank_cell_style)\n worksheet_all_stock_rotation.write(row,column+15,last_purchase_date,body_style)\n worksheet_all_stock_rotation.write(row,column+16,last_sales,body_style)\n row+=1\n return True", "def do(self):\r\n self.dlCsvReport()\r\n self.dlXlsReport()", "def report_printing(cls):\n while True:\n print('Donor Name' + ' ' * 16 + '| Total Given | Num Gifts | Average Gift')\n print('-' * 66)\n print(donor_db.create_report())\n print('Returning to main menu...\\n')\n return", "def export(self,**kwargs):\n \n # import pdb;pdb.set_trace()\n \n # provide for case where recs are set extenally\n if not self.recs:\n self.select_recs(**kwargs)\n if self.recs:\n if self.export_file_name:\n filename = self.export_file_name\n else:\n filename = \"{table_name}_report_{datetime}.csv\".format(\n table_name = self.table.display_name,\n datetime = date_to_string(local_datetime_now(),'iso_datetime'),\n ).replace(' ','_').lower()\n \n if not self.export_fields:\n # include all fields by default\n self.export_fields = self._set_default_list_fields(include_all=True).copy()\n\n self.set_list_fields(self.export_fields)\n \n \n if self.export_template:\n result = render_template(self.export_template, data=self)\n else:\n # add a descriptive title row\n if self.export_title:\n result = self.export_title.strip() + '\\n'\n else:\n result = \"Export of table {} as of {}\\n\".format(self.table.table_name,excel_date_and_time_string(local_datetime_now()))\n \n result += ','.join([x['label'] for x in self.export_fields]) + '\\n'\n for rec in self.recs:\n rec_row = []\n for field in self.export_fields:\n data = rec.__getattribute__(field['name'])\n if field['type'].upper() == \"DATE\":\n data = local_date_string(data)\n elif field['type'].upper() == \"DATETIME\":\n data = excel_date_and_time_string(data)\n else:\n # just text\n data = str(data).strip()\n \n # replace double quotes with double-double quotes\n data = data.replace('\"','\"\"') #double up on double quotes\n \n if \",\" in data:\n # if any commas, wrap in quotes\n data = '\"' + data + '\"'\n \n #replace returns\n data = data.replace('\\r\\n',' -crnl- ')\n data = data.replace('\\n',' -nl- ')\n data = data.replace('\\r',' -rtn- ')\n\n rec_row.append(data)\n \n result += ','.join([str(x) for x in rec_row]) + '\\n'\n \n return DataStreamer(result,filename,'text/csv').send()\n \n self.result_text = \"No records selected\"\n self.success = False\n \n flash(self.result_text)\n return self.list(**kwargs)", "def gen_report(self):\n self.report = '#Report for {0}\\n'.format(self.ip)\n self.report += 'This report was generated by the chameleon pentest bot. We cannot grant 100% accurate results.\\n'\n self.report += '###Services:\\n'\n for service in self.services:\n self.report += '#####{0}:\\n- Port: {1}\\n- Info:{2}'.format(service.name, service.port, service.info)\n self.report += '###Vulnerabilities:\\n'\n for vuln in self.vulns:\n self.report += '- {0}\\n'.format(vuln.name)\n self.report += 'Open an issue for wrong results at github.com/coretool/chameleon.'", "def create_excel_workbook(\n file_name: str,\n league: League,\n schedule: Schedule,\n list_teams: list[str],\n list_capts: list[str],\n):\n # write to excel\n wb = Workbook()\n\n # write to active worksheet\n ws = wb.active\n\n # Title\n ws[\"A1\"] = league.name\n # First half of schedule\n for rnd_num in range(int(schedule.nrounds / 2)):\n sch_round = schedule.rounds[rnd_num]\n row = [\n sch_round.play_date,\n ]\n for match in range(sch_round.nmatches):\n _match = sch_round.matches[match]\n _away = _match.away\n _home = _match.home\n row.append(\n f\"{str(list_teams.index(_away) + 1).zfill(2):2} @ {str(list_teams.index(_home) + 1).zfill(2):2}\"\n )\n ws.append(row)\n ws[\"B17\"] = league.divs[0].name\n ws[\"G17\"] = league.divs[1].name\n for _team_i in range(league.divs[0].nteams):\n row = [\"\"]\n row.append(_team_i + 1)\n row.append(f\"{list_teams[_team_i]} - {list_capts[_team_i]}\")\n row.extend([\"\", \"\", \"\"])\n row.append(_team_i + 11)\n row.append(f\"{list_teams[_team_i + 10]} - {list_capts[_team_i + 10]}\")\n ws.append(row)\n\n # Title\n ws[\"A29\"] = league.name\n # Second half of schedule\n for rnd_num in range(int(schedule.nrounds / 2), schedule.nrounds):\n sch_round = schedule.rounds[rnd_num]\n row = [\n sch_round.play_date,\n ]\n for match in range(sch_round.nmatches):\n _match = sch_round.matches[match]\n _away = _match.away\n _home = _match.home\n row.append(\n f\"{str(list_teams.index(_away) + 1).zfill(2):2} @ {str(list_teams.index(_home) + 1).zfill(2):2}\"\n )\n ws.append(row)\n ws[\"B45\"] = league.divs[0].name\n ws[\"G45\"] = league.divs[1].name\n for _team_i in range(league.divs[0].nteams):\n row = [\"\"]\n row.append(_team_i + 1)\n row.append(f\"{list_teams[_team_i]} - {list_capts[_team_i]}\")\n row.extend([\"\", \"\", \"\"])\n row.append(_team_i + 11)\n row.append(f\"{list_teams[_team_i + 10]} - {list_capts[_team_i + 10]}\")\n ws.append(row)\n\n wb.save(file_name)", "def _generate_invoice_report(self, request, queryset):\n logger.info('Generating invoice report for model {}'.format(\n queryset.model\n ))\n data = self._get_report_data(request, queryset)\n content = self._get_pdf_content(data)\n file_name = '{}-{}.pdf'.format(\n self._invoice_report_name, data['id'],\n )\n return generate_pdf_response(content, file_name)", "def write_table(final_df, outfile, stages):\n\n workbook = xlsxwriter.Workbook(\n str(outfile), {\"constant_memory\": False}\n )\n worksheet = workbook.add_worksheet(\"Table 1\")\n\n header_color = \"#F2DCDB\"\n white = \"#000000\"\n black = \"#FFFFFF\"\n loc_cell_width = 20\n data_cell_width = 15\n column_start = 65\n\n header_format = get_format_obj(\n workbook, bg_color=header_color, font_size=12, bold=True\n )\n title_format = get_format_obj(\n workbook, bg_color=white, font_size=13, align=False, bold=True\n )\n title_format.set_font_color(black)\n \n # Column length is basically all columns in the dataframe except 'level'\n col_len = final_df.shape[1]-1\n \n data_cols = final_df.drop([\"level\", \"lancet_label\"], axis=1).columns.values\n \n cols = list(map(chr, range(column_start, column_start+col_len)))\n worksheet.set_column(cols[0]+\":\"+cols[0], loc_cell_width)\n worksheet.set_column(cols[1]+\":\"+cols[-1], data_cell_width)\n\n # place-holder to manually adjust title as needed\n title = (\n \"Title goes here.\"\n )\n curr_row = 1\n end_row = curr_row + CELL_HT[\"title\"]\n row_range = cols[0] + str(curr_row) + \":\" + cols[-1] + str(end_row)\n worksheet.merge_range(row_range, title, title_format)\n\n curr_row = end_row+1\n page_row_count = 1\n page_breaks = []\n\n for _, row in final_df.iterrows():\n page_row_count += 1\n \n ### Insert page break after 20 rows.\n if row[\"level\"] == 0 or (page_row_count != 0 and\n page_row_count % 20 == 0):\n page_row_count = 0\n page_breaks.append(curr_row - 1)\n curr_row = write_header(\n worksheet, curr_row, cols, data_cols,\n header_format, stages\n )\n end_row = curr_row + CELL_HT[\"data_cols\"]\n col_idx = 0\n\n if row[\"level\"] < 3:\n loc_fmt_obj = get_format_obj(\n workbook, font_size=11,\n bg_color=header_color, bold=True,\n align=False\n )\n data_fmt_obj = get_format_obj(\n workbook, font_size=11,\n bg_color=header_color, bold=True\n )\n else:\n loc_fmt_obj = get_format_obj(\n workbook, font_size=11, align=False\n )\n data_fmt_obj = get_format_obj(\n workbook, font_size=11\n )\n\n for col in final_df:\n if col == \"level\":\n continue\n\n row_range = (\n cols[col_idx] + str(curr_row) + \":\" +\n cols[col_idx] + str(end_row)\n )\n if col == \"lancet_label\":\n loc_name = INDENT_MAP[row[\"level\"]] + row[col]\n worksheet.merge_range(row_range, loc_name, loc_fmt_obj)\n else:\n worksheet.merge_range(row_range, row[col], data_fmt_obj)\n\n col_idx += 1\n curr_row = end_row+1\n\n worksheet.set_h_pagebreaks(page_breaks[1:])\n worksheet.fit_to_pages(1, 0)\n workbook.close()", "def printReport( self, event):\n \n choiceListMonths = []\n choiceListDays = []\n choiceListWeeks = [\"Last Week\"]\n \n for root, dir, files in os.walk('Database\\\\Reports'):\n for file in files:\n if self.monthName(file[:2]) not in choiceListMonths:\n choiceListMonths.append(self.monthName(file[:2]))\n choiceListDays.append(file)\n \n lastWeek = self.getLastWeek()\n \n \n choiceListMaster = choiceListMonths+choiceListWeeks+choiceListDays\n \n \n choice = wx.MultiChoiceDialog(None, \n 'What days would you like to print?', \n 'Print Reports', choiceListMaster)\n choice.Centre()\n choice.ShowModal()\n result = choice.GetSelections()\n \n \n for selection in result:\n \n # Get all daily reports for month if True\n if choiceListMaster[selection] in choiceListMonths:\n for root, dir, files in os.walk('Database\\\\Reports'):\n for file in files:\n if self.monthName(file[:2]) == \\\n choiceListMaster[selection]:\n subprocess.call('\"C:\\\\Program Files (x86)\\\\'\n 'OpenOffice.org 3\\\\program\\\\'\n 'soffice.exe\" -p Database\\\\'\n 'Reports\\\\'+file)\n \n elif str(choiceListMaster[selection]) in choiceListWeeks:\n for root, dir, files in os.walk('Database\\\\Reports'):\n for file in files:\n if datetime.date(\n int(file[6:10].lstrip('0')),\n int(file[:2].lstrip('0')),\n int(file[3:5].lstrip('0'))) in lastWeek:\n \n subprocess.call('\"C:\\\\Program Files (x86)\\\\'\n 'OpenOffice.org 3\\\\program\\\\'\n 'soffice.exe\" -p Database\\\\'\n 'Reports\\\\'+file)\n \n else:\n subprocess.call('\"C:\\\\Program Files (x86)\\\\'\n 'OpenOffice.org 3\\\\program\\\\'\n 'soffice.exe\" -p Database\\\\'\n 'Reports\\\\'+\n choiceListMaster[selection])\n \n choice.Destroy()", "def create_all_inventory_sheet(self,from_date,to_date,workbook,sheet_data,row_data,body_style,qty_cell_style,value_style,blank_cell_style,header_bold):\n worksheet_all_stock_rotation = workbook.add_sheet('All Stock Rotation',cell_overwrite_ok=True)\n worksheet_all_stock_rotation.row(0).height = 400\n worksheet_all_stock_rotation.row(1).height = 400\n worksheet_all_stock_rotation.row(3).height = 400\n worksheet_all_stock_rotation.col(0).width = 8000\n worksheet_all_stock_rotation.col(1).width = 10000\n worksheet_all_stock_rotation.col(2).width = 3000\n worksheet_all_stock_rotation.col(3).width = 3000\n worksheet_all_stock_rotation.col(4).width = 1200\n worksheet_all_stock_rotation.col(5).width = 5000\n worksheet_all_stock_rotation.col(6).width = 5000\n worksheet_all_stock_rotation.col(7).width = 5000\n worksheet_all_stock_rotation.col(8).width = 7000\n worksheet_all_stock_rotation.col(9).width = 7000\n worksheet_all_stock_rotation.col(10).width = 5000\n worksheet_all_stock_rotation.col(11).width = 1200\n worksheet_all_stock_rotation.col(12).width = 7000\n worksheet_all_stock_rotation.col(13).width = 7000\n worksheet_all_stock_rotation.col(14).width = 1200\n worksheet_all_stock_rotation.col(15).width = 6000\n worksheet_all_stock_rotation.col(16).width = 6000\n worksheet_all_stock_rotation.write(0,0,'From Date',header_bold)\n worksheet_all_stock_rotation.write(0,1,from_date,body_style)\n worksheet_all_stock_rotation.write(1,0,'To Date',header_bold)\n worksheet_all_stock_rotation.write(1,1,to_date,body_style)\n worksheet_all_stock_rotation.write(3,0,'Internal Reference ',header_bold)\n worksheet_all_stock_rotation.write(3,1,'Name',header_bold)\n worksheet_all_stock_rotation.write(3,2,'Cost',header_bold)\n worksheet_all_stock_rotation.write(3,3,'Sales Price',header_bold)\n worksheet_all_stock_rotation.write(3,4,None,blank_cell_style)\n worksheet_all_stock_rotation.write(3,5,'Opening Stock',header_bold)\n worksheet_all_stock_rotation.write(3,6,'Purchase in period',header_bold)\n worksheet_all_stock_rotation.write(3,7,'Sales in Period',header_bold)\n worksheet_all_stock_rotation.write(3,8,'Discarded in Period(OUT)',header_bold)\n worksheet_all_stock_rotation.write(3,9,'Adjusted in Period(IN)',header_bold)\n worksheet_all_stock_rotation.write(3,10,'Closing Stock',header_bold)\n worksheet_all_stock_rotation.write(3,11,None,blank_cell_style)\n worksheet_all_stock_rotation.write(3,12,'Warehouse Transfer(IN)',header_bold)\n worksheet_all_stock_rotation.write(3,13,'Warehouse Transfer(OUT)',header_bold)\n worksheet_all_stock_rotation.write(3,14,None,blank_cell_style)\n worksheet_all_stock_rotation.write(3,15,'Last purchase',header_bold)\n worksheet_all_stock_rotation.write(3,16,'Last sale',header_bold)\n worksheet_all_stock_rotation.set_panes_frozen(True)\n worksheet_all_stock_rotation.set_horz_split_pos(4) \n worksheet_all_stock_rotation.set_vert_split_pos(2)\n return workbook,worksheet_all_stock_rotation", "def print_report(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n datas = {'ids': context.get('active_ids', [])}\n\n res = self.read(cr, uid, ids, context=context)\n res = res and res[0] or {}\n datas.update({'form': res})\n return self.pool['report'].get_action(cr, uid, ids, \n 'l10n_cl_hr_payroll.report_hrsalarybymonth', \n data=datas, context=context)", "def report(self, report_options=None):\n raise NotImplementedError()", "def GenerateReport(self, plugin):\n raise 'Method not implemented'", "def printReport(self): \n \n print('Distribution: ', self._distribution_type)\n print('Distribution Type: ', str(self._measure_type).replace('MeasureType.','')) \n print('Type Detection Match: ', str(self._measure_type_match))\n print('MLE: ', str(self._mle))\n print('Goodness of Fit: ', str(self._gof)) \n print('Goodness of Fit Pass: ', str(self._pass)) \n print('Overall Score: ', str(self._score)) \n print('-------------')", "def generate(self):\n\n # All columns should have the same number of rows.\n num_rows = len(self.columns[0].rows)\n for column in self.columns:\n assert len(column.rows) == num_rows\n\n # Write the header.\n def generateTitle(element):\n if isinstance(element, Column):\n return element.title.center(element.width)\n else:\n return \" \" * len(element)\n\n titles = self.gutter.join(\n [ generateTitle(e) for e in self.elements ])\n result = titles + \"\\n\" + len(titles) * \"-\" + \"\\n\"\n\n # Write the rows.\n def generateCell(element, r):\n if isinstance(element, Column):\n return element.generateRow(r)\n else:\n return element\n\n for r in xrange(num_rows):\n line = self.gutter.join(\n [ generateCell(e, r) for e in self.elements ])\n result += line + \"\\n\"\n\n # End the table.\n result += \"\\n\"\n\n return result", "def print_report(d):\n report_data = d.get_report_data()\n title = \"{:24} | {:12} | {:10} | {:20}\"\n dashes=67*('-');print(dashes)\n print(title.format('Donor Name','Total Given','Num Gifts','Average Gift'))\n strf_format = \"{:24} ${:12.2f} {:^10d} ${:12.2f}\"\n print(dashes)\n for donor in report_data:\n print(strf_format.format(*donor))\n print(dashes)", "def _create_xml_report(self, test, xml_obj):\n xml_report_path = os.path.join(test.work_dir,\n self.XML_REPORT_PATH)\n with open(xml_report_path, 'w') as xml_report:\n xml_report.write(etree.tostring(xml_obj, pretty_print=True))", "def write_report(self):\n\n # Write the title line. Set in INDEX marker so that this section will be\n # identified as a major category if this is included in a Book report.\n\n title = self._(\"all persons Report\")\n mark = IndexMark(title, INDEX_TYPE_TOC, 1) \n self.doc.start_paragraph(\"PLC-ReportTitle\")\n self.doc.write_text(title, mark)\n self.doc.end_paragraph()\n\n self.doc.start_table(\"PersonTable\", \"PLC-EventTable\")\n column_titles = [_(\"LNr\"), _(\"handle\"), _(\"Person\")] \n \n self.doc.start_row()\n for title in column_titles:\n self.doc.start_cell(\"PLC-Details\")\n self.doc.start_paragraph(\"PLC-Details\")\n self.doc.write_text(title)\n self.doc.end_paragraph()\n self.doc.end_cell()\n self.doc.end_row() \n\n self.__write_all_persons()", "def latex_report(result, *, decimal_places=3, prefix=\"\", generate_plots=True, figure_path=\"\", complete_document=True):\n if not isinstance(result, RankResult):\n raise TypeError(\"result must be of type RankResult and should be the outcome of calling the autorank function.\")\n\n if complete_document:\n print(r\"\\documentclass{article}\")\n print()\n print(r\"\\usepackage{graphicx}\")\n print(r\"\\usepackage{booktabs}\")\n print()\n print(r\"\\begin{document}\")\n print()\n\n print(r\"\\section{Results}\")\n print(r\"\\label{sec:%sresults}\" % prefix)\n print()\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n create_report(result, decimal_places=decimal_places)\n report = sys.stdout.getvalue()\n sys.stdout = old_stdout\n report = report.replace(\"_\", r\"\\_\")\n report = report.replace(\"+-\", r\"$\\pm$\")\n report = report.replace(\"(d=\", \"($d$=\")\n report = report.replace(\"(delta=\", r\"($\\delta$=\")\n report = report.replace(\"is alpha\", r\"$\\alpha$\")\n print(report.strip())\n print()\n\n if len(result.rankdf) > 2:\n latex_table(result, decimal_places=decimal_places, label='tbl:%sstat_results' % prefix)\n print()\n\n if result.omnibus != 'wilcoxon' and result.omnibus != 'bayes' and generate_plots and result.pvalue < result.alpha:\n # only include plots if the results are significant\n plot_stats(result)\n if len(figure_path) > 0 and not figure_path.endswith(\"/\"):\n figure_path += '/'\n figure_path = \"%s%sstat_results.pdf\" % (figure_path, prefix)\n plt.savefig(figure_path)\n\n print(r\"\\begin{figure}[h]\")\n print(r\"\\includegraphics[]{%s}\" % figure_path)\n if result.posthoc == 'nemenyi':\n print(r\"\\caption{CD diagram to visualize the results of the Nemenyi post-hoc test. The horizontal lines \"\n r\"indicate that differences are not significant.}\")\n elif result.posthoc == 'TukeyHSD' or result.posthoc == 'ttest':\n print(r\"\\caption{Confidence intervals and mean values of the populations.}\")\n else:\n # fallback in case of unknown post-hoc test. should not happen\n print(r\"\\caption{Plot of the results}\")\n print(r\"\\label{fig:%sstats_fig}\" % prefix)\n print(r\"\\end{figure}\")\n print()\n\n if complete_document:\n print(r\"\\end{document}\")", "def to_xlsx(self, filename):\n # create path if it does not exist\n suffix = filename.split(\".\")[-1]\n if not suffix == \"xlsx\":\n filename = filename + \".xlsx\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n writer = pd.ExcelWriter(filename)\n for name, df in sorted(self.input_data.items()):\n df.to_excel(writer, name)\n writer.save()\n logging.info(\"Scenario saved as excel file to %s\", filename)", "def format_report_header(self):", "def run_report_generation(**kwargs):\n out = run_python_script_helper(\n os.path.dirname(__file__), \"report_generation_example.py\", **kwargs\n )\n return out", "def _generate_report(self):\n\n _LOG.info(\"Generating the HTML report.\")\n\n # Make sure the output directory exists.\n try:\n self.outdir.mkdir(parents=True, exist_ok=True)\n except OSError as err:\n raise Error(f\"failed to create directory '{self.outdir}': {err}\")\n\n raw_stats_paths, descr_paths = self._copy_raw_data()\n\n # Find the styles and templates paths.\n templdir = FSHelpers.search_for_app_data(\"wult\", Path(\"templates\"),\n pathdescr=\"HTML report Jinja2 templates\")\n csspath = FSHelpers.search_for_app_data(\"wult\", Path(\"css/style.css\"),\n pathdescr=\"HTML report CSS file\")\n\n # Copy the styles file to the output directory.\n dstpath = self.outdir.joinpath(\"style.css\")\n try:\n shutil.copyfile(csspath, dstpath)\n except OSError as err:\n raise Error(f\"failed to copy CSS file from '{csspath}' to '{dstpath}':\\n{err}\")\n\n # The summary table is only included into the main HTML page.\n sum_tbl = self._prepare_summary_table(raw_stats_paths, descr_paths)\n links_tbl = self._prepare_links_table()\n\n # Each column name gets its own HTML page.\n for colname, pinfos in self._pinfos.items():\n stats_tbl = self._prepare_stats_table(pinfos)\n\n # Render the template.\n jenv = Jinja2.build_jenv(templdir, trim_blocks=True, lstrip_blocks=True)\n jenv.globals[\"stats_tbl\"] = stats_tbl\n jenv.globals[\"pinfos\"] = pinfos\n jenv.globals[\"colname\"] = colname\n jenv.globals[\"title_descr\"] = self.title_descr\n jenv.globals[\"toolname\"] = self._refinfo[\"toolname\"]\n\n if sum_tbl:\n jenv.globals[\"sum_tbl\"] = sum_tbl\n jenv.globals[\"links_tbl\"] = links_tbl\n templfile = outfile = \"index.html\"\n sum_tbl = None\n else:\n templfile = \"metric.html\"\n outfile = links_tbl[colname][\"fname\"]\n\n Jinja2.render_template(jenv, Path(templfile), outfile=self.outdir.joinpath(outfile))", "def main():\n path = userSpecify()\n newWorksheet = []\n w = 0\n path_list = path.split(os.sep)\n #print \"This is the path list: \", path_list\n dirName = path_list[-1]\n #print \"This is the Directory Name: \", dirName\n #List logs into an array\n print \"\\nFinding dstat logs ...\"\n ListOfLogs = []\n for r,d,f in os.walk(path, topdown=False):\n for files in f:\n if files.endswith(\".csv\") and \"dstat\" in files:\n ListOfLogs.append(os.path.join(r,files))\n for z in ListOfLogs:\n print \"Logs found: \", z\n\n workbook = xlsxwriter.Workbook(dirName + '_DSTAT_Test.xlsx')\n #print \"Created a workbook!\", workbook\n print \"Created a workbook!\"\n\n file_date_tuple_list = []\n for x in ListOfLogs:\n #Organize list of Logs in reverse order to display latest analysis first\n d = os.path.getmtime(x)\n file_date_tuple = (x,d)\n file_date_tuple_list.append(file_date_tuple)\n file_date_tuple_list.sort(key=lambda x: x[1], reverse=True)\n\n for each in file_date_tuple_list:\n ListOfCharts = []\n file = open_file(each[0])\n node = seperate_file(each[0])\n newWorksheet.append(workbook.add_worksheet(node))\n #print \"Created \", node, \" worksheet.\"\n #print \"This is the worksheet here: \", worksheet\n Cpu_bool, Mem_bool, Disk_bool, Net_bool, newWorksheet, lineCount = read_lines(file, newWorksheet, w)\n #print \"Cpu_bool is: \", Cpu_bool\n #print \"Mem_bool is: \", Mem_bool\n #print \"Disk_bool is: \", Disk_bool\n #print \"Net_bool is: \", Net_bool\n\n #Add Graphs\n rowGraph = 7\n columnGraph = 15\n if Cpu_bool == True:\n cpuUsrSysChart = graph_cpu_user_sys(workbook, node, lineCount)\n ListOfCharts.append(cpuUsrSysChart)\n cpuIdleChart = graph_cpu_idle(workbook, node, lineCount)\n ListOfCharts.append(cpuIdleChart)\n #print \"Here is the list of Charts: \", ListOfCharts\n else:\n print \"Failure: Data missing for Total CPU Usage!\"\n if Mem_bool == True:\n memFreeChart = graph_mem_free(workbook, node, lineCount)\n ListOfCharts.append(memFreeChart)\n #newWorksheet.insert_chart('P28', memFreeChart)\n else:\n print \"Failure: Data missing for Memory Usage!\"\n if Disk_bool == True:\n diskIOChart = graph_disk_io(workbook, node, lineCount)\n ListOfCharts.append(diskIOChart)\n #newWorksheet.insert_chart('Y28', diskIOChart)\n else:\n print \"Failure: Data missing from Disk Usage!\"\n if Net_bool == True:\n netTraffChart = graph_net_trans(workbook, node, lineCount)\n ListOfCharts.append(netTraffChart)\n #newWorksheet.insert_chart('P49', netTraffChart)\n else:\n print \"Failure: Data missing from Network Usage!\"\n for graph in ListOfCharts:\n newWorksheet[w].insert_chart(rowGraph,columnGraph, graph)\n rowGraph = rowGraph + 21\n w = w + 1\n workbook.close()", "def add_headings(self,from_date,to_date,warehouses,locations,workbook,header_bold,body_style,qty_cell_style,value_style,blank_cell_style):\n sheet_data={}\n row_data={}\n if warehouses:\n for warehouse in warehouses:\n warehouse.name_worksheet = workbook.add_sheet(warehouse.name,cell_overwrite_ok=True)\n warehouse.name_worksheet.row(0).height = 400\n warehouse.name_worksheet.row(1).height = 400\n warehouse.name_worksheet.row(3).height = 400\n warehouse.name_worksheet.col(0).width = 8000\n warehouse.name_worksheet.col(1).width = 10000\n warehouse.name_worksheet.col(2).width = 3000\n warehouse.name_worksheet.col(3).width = 3000\n warehouse.name_worksheet.col(4).width = 1200\n warehouse.name_worksheet.col(5).width = 5000\n warehouse.name_worksheet.col(6).width = 5000\n warehouse.name_worksheet.col(7).width = 5000\n warehouse.name_worksheet.col(8).width = 7000\n warehouse.name_worksheet.col(9).width = 7000\n warehouse.name_worksheet.col(10).width = 5000\n warehouse.name_worksheet.col(11).width = 1200\n warehouse.name_worksheet.col(12).width = 7000\n warehouse.name_worksheet.col(13).width = 7000\n warehouse.name_worksheet.col(14).width = 1200\n warehouse.name_worksheet.col(15).width = 6000\n warehouse.name_worksheet.col(16).width = 6000\n warehouse.name_worksheet.write(0,0,'From Date',header_bold)\n warehouse.name_worksheet.write(0,1,from_date,body_style)\n warehouse.name_worksheet.write(1,0,'To Date',header_bold)\n warehouse.name_worksheet.write(1,1,to_date,body_style)\n warehouse.name_worksheet.write(3,0,'Internal Reference ',header_bold)\n warehouse.name_worksheet.write(3,1,'Name',header_bold)\n warehouse.name_worksheet.write(3,2,'Cost',header_bold)\n warehouse.name_worksheet.write(3,3,'Sales Price',header_bold)\n warehouse.name_worksheet.write(3,4,None,blank_cell_style)\n warehouse.name_worksheet.write(3,5,'Opening Stock',header_bold)\n warehouse.name_worksheet.write(3,6,'Purchase in period',header_bold)\n warehouse.name_worksheet.write(3,7,'Sales in Period',header_bold)\n warehouse.name_worksheet.write(3,8,'Discarded in Period(OUT)',header_bold)\n warehouse.name_worksheet.write(3,9,'Adjusted in Period(IN)',header_bold)\n warehouse.name_worksheet.write(3,10,'Closing Stock',header_bold)\n warehouse.name_worksheet.write(3,11,None,blank_cell_style)\n warehouse.name_worksheet.write(3,12,'Warehouse Transfer(IN)',header_bold)\n warehouse.name_worksheet.write(3,13,'Warehouse Transfer(OUT)',header_bold)\n warehouse.name_worksheet.write(3,14,None,blank_cell_style)\n warehouse.name_worksheet.write(3,15,'Last purchase',header_bold)\n warehouse.name_worksheet.write(3,16,'Last sale',header_bold)\n warehouse.name_worksheet.set_panes_frozen(True)\n warehouse.name_worksheet.set_horz_split_pos(4) \n warehouse.name_worksheet.set_vert_split_pos(2)\n sheet_data.update({warehouse.id: warehouse.name_worksheet})\n row_data.update({warehouse.name_worksheet: 4})\n if locations:\n for location in locations:\n location.name_worksheet = workbook.add_sheet(location.name,cell_overwrite_ok=True)\n location.name_worksheet.row(0).height = 400\n location.name_worksheet.row(1).height = 400\n location.name_worksheet.row(3).height = 400\n location.name_worksheet.col(0).width = 8000\n location.name_worksheet.col(1).width = 10000\n location.name_worksheet.col(2).width = 3000\n location.name_worksheet.col(3).width = 3000\n location.name_worksheet.col(4).width = 1200\n location.name_worksheet.col(5).width = 5000\n location.name_worksheet.col(6).width = 5000\n location.name_worksheet.col(7).width = 5000\n location.name_worksheet.col(8).width = 7000\n location.name_worksheet.col(9).width = 7000\n location.name_worksheet.col(10).width = 5000\n location.name_worksheet.col(11).width = 1200\n location.name_worksheet.col(12).width = 7000\n location.name_worksheet.col(13).width = 7000\n location.name_worksheet.col(14).width = 1200\n location.name_worksheet.col(15).width = 6000\n location.name_worksheet.col(16).width = 6000\n location.name_worksheet.write(0,0,'From Date',header_bold)\n location.name_worksheet.write(1,0,'To Date',header_bold)\n location.name_worksheet.write(0,1,from_date,body_style)\n location.name_worksheet.write(1,1,to_date,body_style)\n location.name_worksheet.write(3,0,'Internal Reference ',header_bold)\n location.name_worksheet.write(3,1,'Name',header_bold)\n location.name_worksheet.write(3,2,'Cost',header_bold)\n location.name_worksheet.write(3,3,'Sales Price',header_bold)\n location.name_worksheet.write(3,4,None,blank_cell_style)\n location.name_worksheet.write(3,5,'Opening Stock',header_bold)\n location.name_worksheet.write(3,6,'Purchase in period',header_bold)\n location.name_worksheet.write(3,7,'Sales in Period',header_bold)\n location.name_worksheet.write(3,8,'Discarded in Period(OUT)',header_bold)\n location.name_worksheet.write(3,9,'Adjusted in Period(IN)',header_bold)\n location.name_worksheet.write(3,10,'Closing Stock',header_bold)\n location.name_worksheet.write(3,11,None,blank_cell_style)\n location.name_worksheet.write(3,12,'Warehouse Transfer(IN)',header_bold)\n location.name_worksheet.write(3,13,'Warehouse Transfer(OUT)',header_bold)\n location.name_worksheet.write(3,14,None,blank_cell_style)\n location.name_worksheet.write(3,15,'Last purchase',header_bold)\n location.name_worksheet.write(3,16,'Last sale',header_bold)\n location.name_worksheet.set_panes_frozen(True)\n location.name_worksheet.set_horz_split_pos(4) \n location.name_worksheet.set_vert_split_pos(2)\n sheet_data.update({location.id: location.name_worksheet})\n row_data.update({location.name_worksheet: 4})\n return workbook,sheet_data,row_data", "def main():\n\n gephyrin_df = gephyrin_pairwise()\n cav31_df = cav31_pairwise()\n synapsin_df = synapsin_pairwise()\n psd_df = psd95_pairwise()\n vglut1_df = vglut1_pairwise()\n\n\n sheet_name = 'Pairwise'\n fn = 'pairwise_comparisons.xlsx'\n df_list = [synapsin_df, vglut1_df, psd_df, gephyrin_df, cav31_df]\n aa.write_dfs_to_excel(df_list, sheet_name, fn)", "def report(self, filename=None):\r\n if self.settings.units == 'inch':\r\n toolfmt = ' T{:0>2d} {:%d.%df} {: >3d} {:f}in.\\n' % self.settings.format\r\n else:\r\n toolfmt = ' T{:0>2d} {:%d.%df} {: >3d} {:f}mm\\n' % self.settings.format\r\n rprt = '=====================\\nExcellon Drill Report\\n=====================\\n'\r\n if self.filename is not None:\r\n rprt += 'NC Drill File: %s\\n\\n' % self.filename\r\n rprt += 'Drill File Info:\\n----------------\\n'\r\n rprt += (' Data Mode %s\\n' % 'Absolute'\r\n if self.settings.notation == 'absolute' else 'Incremental')\r\n rprt += (' Units %s\\n' % 'Inches'\r\n if self.settings.units == 'inch' else 'Millimeters')\r\n rprt += '\\nTool List:\\n----------\\n\\n'\r\n rprt += ' Code Size Hits Path Length\\n'\r\n rprt += ' --------------------------------------\\n'\r\n for tool in iter(self.tools.values()):\r\n rprt += toolfmt.format(tool.number, tool.diameter, tool.hit_count, self.path_length(tool.number))\r\n if filename is not None:\r\n with open(filename, 'w') as f:\r\n f.write(rprt)\r\n return rprt", "def to_excel(self, filename):\n self.data.to_excel(filename)", "def create_report():\n\n # Sort by total donation amount\n _donors.sort(key=lambda x: -sum(x[1]))\n\n # Generate the report\n _str_ = [\"Donor Name | Total Given | Num Gifts | Average Gift\\n\" +\n \"------------------------------------------------------------------\"]\n for donor in _donors:\n sm = sum(donor[1])\n l = len(donor[1])\n _str_.append(f\"{donor[0]:<25} $ {sm:>9.2f} {l:>9d} $ {sm / l:>10.2f}\")\n\n report = '\\n'.join(_str_)\n print(report)\n return report", "def GenerateReport(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"generateReport\", payload=payload, response_object=None)" ]
[ "0.7330527", "0.6696778", "0.64272696", "0.6423999", "0.640377", "0.63337004", "0.625461", "0.621144", "0.61932975", "0.61916244", "0.6103704", "0.59963775", "0.5941088", "0.5938474", "0.5917404", "0.58896947", "0.58573407", "0.5852752", "0.5852723", "0.58489084", "0.5818077", "0.5803708", "0.57907164", "0.5779262", "0.57602906", "0.57589215", "0.57451236", "0.5731941", "0.57286435", "0.5727536", "0.5708689", "0.57073605", "0.570677", "0.5685545", "0.5681156", "0.56699526", "0.5655836", "0.56475735", "0.56460685", "0.5636936", "0.5630915", "0.5629761", "0.56290513", "0.56239605", "0.5591636", "0.5588945", "0.55823475", "0.55806637", "0.5580076", "0.5568518", "0.55400664", "0.5539035", "0.5536265", "0.5526016", "0.55210984", "0.5517906", "0.5513909", "0.55075634", "0.550735", "0.55068296", "0.55022705", "0.5498976", "0.5478224", "0.54712594", "0.5470698", "0.544947", "0.54439455", "0.5432606", "0.5413685", "0.540134", "0.53995115", "0.53980494", "0.5394678", "0.5386304", "0.53819925", "0.53701085", "0.5368431", "0.5360767", "0.5359108", "0.53543496", "0.53518677", "0.5332082", "0.53303266", "0.53282505", "0.53276896", "0.53227586", "0.53147423", "0.53134966", "0.53121495", "0.53027606", "0.52785397", "0.5265833", "0.5265276", "0.5263781", "0.5262515", "0.5262219", "0.52592254", "0.5252805", "0.5250166", "0.5241975" ]
0.69838953
1
Sample pure Lambda function
def lambda_handler(event, context): # try: # ip = requests.get("http://checkip.amazonaws.com/") # except requests.RequestException as e: # # Send some context about this error to Lambda Logs # print(e) # raise e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_lambda(n):\n for i in range(n):\n yield lambda : i", "def test_lambda(n):\n return [lambda v=i: v for i in range(n)]", "def lambda_method(self,t): \n return 5*math.sin(2*math.pi*1*t) # I don't see the value of 1 here but this is how lamda is defined in the exercise.", "def n_lambda(self):\n return self.b()", "def one():\n return lambda f: lambda x: f(x)", "def lambda_fn(ds, fn):\n logger.info(\"Applying function '%s' on dataset.\", str(fn))\n return fn(ds)", "def testGetLambda(self):\n self.ports.get_lambda(file_name = 'get_lambda.xml', port_ids = portsDict['port_ids'], lambdas = portsDict['lambda'])", "def test_lambda_support_no_parameters_complex_expression(self):\n code = \"\"\"\n () -> {\n if (true) return 21;\n else\n {\n int result = 21;\n return result / 2;\n }\n };\"\"\"\n self.assert_contains_lambda_expression_in_m(\n parse.parse(setup_java_class(code)))", "def lambda_eval(v):\n return v() if hasattr(v, '__call__') else v", "def test_lambda_support_no_parameters_expression_body(self):\n test_classes = [\n setup_java_class(\"() -> 3;\"),\n setup_java_class(\"() -> null;\"),\n setup_java_class(\"() -> { return 21; };\"),\n setup_java_class(\"() -> { System.exit(1); };\"),\n ]\n for test_class in test_classes:\n clazz = parse.parse(test_class)\n self.assert_contains_lambda_expression_in_m(clazz)", "def task_4_return_lambda_sum_2_ints() -> DT:\n return lambda x, y: x + y", "def _gen_closure(fnc,arg):\n return lambda s: fnc(arg)", "def test_lambda_support_no_parameters_no_body(self):\n self.assert_contains_lambda_expression_in_m(\n parse.parse(setup_java_class(\"() -> {};\")))", "def dummy_fn(self, *args, **kwargs):", "def test_generate_lambda_alchemical_function(self):\n from openmmtools.utils import math_eval\n\n def evaluate(expr, l):\n variables = {'lambda': l}\n return math_eval(expr, variables)\n\n # Each test case are [(lambda0, lambda1), (f(lambda0), f([lambda0+lambda1]/2), f(lambda1))]\n # where the second tuple in the list are the expected values of the function.\n test_cases = [(0, 1), (1, 0), (2, 3), (3, 2), (4, 8), (10, 5)]\n\n for lambda0, lambda1 in test_cases:\n expr = ExperimentBuilder._generate_lambda_alchemical_function(lambda0, lambda1)\n print(lambda0, lambda1, ':', expr)\n assert evaluate(expr, lambda0) == 0.0\n assert evaluate(expr, (lambda0 + lambda1)/2) == 0.5\n assert evaluate(expr, lambda1) == 1.0\n\n # The funciton must be constant after the end states.\n if lambda0 < lambda1:\n assert evaluate(expr, lambda0-1) == 0.0\n assert evaluate(expr, lambda1+1) == 1.0\n else:\n assert evaluate(expr, lambda0+1) == 0.0\n assert evaluate(expr, lambda1-1) == 1.0", "def test_lambda_wrapper_basic_events(reporter_mock, context):\n\n @lumigo_tracer(token=\"123\")\n def lambda_test_function(event, context):\n pass\n\n lambda_test_function({}, context)\n function_span = SpansContainer.get_span().function_span\n assert not SpansContainer.get_span().spans\n assert \"started\" in function_span\n assert \"ended\" in function_span\n assert reporter_mock.call_count == 2\n first_send = reporter_mock.call_args_list[0][1][\"msgs\"]\n assert len(first_send) == 1\n assert first_send[0][\"id\"].endswith(\"_started\")\n assert first_send[0][\"maxFinishTime\"]", "def visit_Lambda(self, node: ast.Lambda) -> None:\n self._check_useless_lambda(node)\n self._check_implicit_primitive(node)\n self.generic_visit(node)", "def visit_Lambda(self, node: ast.Lambda) -> None:\n self._counter.check_arguments_count(node)\n self.generic_visit(node)", "def two():\n return lambda f: lambda x: f(f(x))", "def getTest(self):\n if self.applyTo == 'global':\n return lambda x, on: on\n fun = evaluate(self.check)[0]\n if self.on:\n return lambda x, on: fun(*x) and on\n else:\n return lambda x, on: fun(*x)", "def islambda(func):\n return getattr(func, 'func_name', False) == '<lambda>'", "def fn():", "def test_simple_closure(a, b):\n def f():\n return a + 1\n\n def g():\n return b + 2\n return f() * g()", "def Poisson(name, lamda):\n return rv(name, PoissonDistribution, lamda)", "def get_lambda(thalf):\n\n return math.log(2.)/thalf", "def sample_function(self, a, b):\r\n return a + b", "def make_anonymous_factorial():\n return lambda val : (lambda f, v : f(f, v)) (lambda f, v : 1 if v == 0 else mul(v, f(f, sub(v, 1))), val)", "def starfilter(\n predicate: Callable[..., bool]\n) -> Callable[[AsyncObservable[Any]], AsyncObservable[Any]]:\n\n def handler(\n next: Callable[[Iterable[Any]], Awaitable[None]], args: Iterable[Any]\n ) -> Awaitable[None]:\n if predicate(*args):\n return next(args)\n return aiotools.empty()\n\n return transform(handler)", "def test_simple_closure(a, b):\n def f():\n return a + 1.0\n\n def g():\n return b + 2.0\n return f() * g()", "def lambda_func_doc(self, label):\n latex = (\n r'0 = \\lambda - '\n r'\\frac{\\dot{m}_\\mathrm{air}}{\\dot{m}_\\mathrm{air,min}}'\n )\n return generate_latex_eq(self, latex, label)", "def sample(self, *args, **kwargs):", "def fn(*args, **kwargs):\n pass", "def V2lambda(V):\n return(3956/V)", "def GenerateSpecialFunction(n):\n return eval('lambda a: %s' % GenerateSpecialExpression(n))", "def sample(self):\n return self._sample_func", "def _funcOrLambda(self, node, gen, ndecorators):\n gen.Start()\n gen.FindLocals()\n gen.Dispatch(node.code)\n gen.Finish()\n\n self.set_lineno(node)\n for default in node.defaults:\n self.visit(default)\n self._makeClosure(gen, len(node.defaults))\n for i in xrange(ndecorators):\n self.emit('CALL_FUNCTION', 1)", "def lambda_function(f):\n @functools.wraps(f)\n def wrapper(event, context):\n global _CURRENT_LAMBDA_CONTEXT\n _CURRENT_LAMBDA_CONTEXT = context\n try:\n result = f(event, context)\n return wait(lambda: result)\n except:\n cls, exc, trace = sys.exc_info()\n report_exc_info((cls, exc, trace.tb_next))\n wait()\n raise\n return wrapper", "def example_function():", "def example_function(any):\n return any + 5", "def test_op_lambda(self) -> None:\n op_base = OpLambda(func=lambda x: x + 3)\n kwargs_per_step_to_add = [dict(), dict(), dict()]\n op_repeat = OpRepeat(op_base, kwargs_per_step_to_add)\n sample_dict = NDict({})\n sample_dict[\"data.val.a\"] = 5\n sample_dict = op_repeat(sample_dict, \"_.test_repeat\", key=\"data.val.a\")\n self.assertEqual(sample_dict[\"data.val.a\"], 14)\n\n op_base = OpLambda(func=lambda x: x + 3, func_reverse=lambda x: x - 3)\n op_repeat = OpRepeat(op_base, kwargs_per_step_to_add)\n sample_dict = NDict({})\n sample_dict[\"data.val.a\"] = 5\n sample_dict = op_repeat(sample_dict, \"_.test_repeat\", key=\"data.val.a\")\n self.assertEqual(sample_dict[\"data.val.a\"], 14)\n\n op_repeat.reverse(\n sample_dict,\n key_to_follow=\"data.val.a\",\n key_to_reverse=\"data.val.a\",\n op_id=\"_.test_repeat\",\n )\n self.assertEqual(sample_dict[\"data.val.a\"], 5)\n\n sample_dict[\"data.val.b\"] = 51\n op_repeat.reverse(\n sample_dict,\n key_to_follow=\"data.val.a\",\n key_to_reverse=\"data.val.b\",\n op_id=\"_.test_repeat\",\n )\n self.assertEqual(sample_dict[\"data.val.a\"], 5)\n self.assertEqual(sample_dict[\"data.val.b\"], 42)", "def dummy_func(*args, **kwargs):\r\n pass", "def sampleFunction(x: int, y: float) -> float:\n return x * y", "def dummy_fn(self):\n\t\tpass", "def r_lambda_func(depth):\n first_order = [lambda x, y: x, lambda x, y: y]\n elementary_func = ['prod', 'avg', 'cos_pi', 'sin_pi']\n if depth == 0:\n rand = random.randint(0, 1)\n if rand == 0:\n return lambda x, y: x\n else:\n return lambda x, y: y\n else:\n rand = random.randint(0, len(elementary_func))\n rand = 1\n if rand == 0:\n return lambda x, y: (r_lambda_func(depth-1)(x, y)) * (r_lambda_func(depth-1)(x, y))\n elif rand == 1:\n return lambda x, y: .5 * ((r_lambda_func(depth-1)(x, y) + r_lambda_func(depth-1)(x, y)))(x, y)\n elif rand == 2:\n return lambda x, y: math.cos(lambda x, y: math.pi(x, y) * r_lambda_func(depth-1)(x, y))(x, y)\n elif rand == 3:\n return lambda x, y: math.sin(lambda x, y: math.pi(x, y) * r_lambda_func(depth-1)(x, y))(x, y)", "def test_op_lambda_with_kwargs(self) -> None:\n op_base = OpLambda(func=lambda x, y: x + y)\n kwargs_per_step_to_add = [dict(), dict(), dict()]\n op_repeat = OpRepeat(op_base, kwargs_per_step_to_add)\n sample_dict = NDict()\n sample_dict[\"data.val.a\"] = 5\n sample_dict = op_repeat(sample_dict, \"_.test_repeat\", key=\"data.val.a\", y=5)\n self.assertEqual(sample_dict[\"data.val.a\"], 20)", "def lambdafan(func):\n if 'AWS_LAMBDA_FUNCTION_NAME' not in os.environ:\n return func\n\n @functools.wraps(func)\n def scaleout(*args, **kw):\n client = boto3.client('lambda')\n client.invoke(\n FunctionName=os.environ['AWS_LAMBDA_FUNCTION_NAME'],\n InvocationType='Event',\n Payload=dumps({\n 'event': 'fanout',\n 'function': func.__name__,\n 'args': args,\n 'kwargs': kw}),\n Qualifier=os.environ['AWS_LAMBDA_FUNCTION_VERSION'])\n return scaleout", "def wrap_gate(fn):\n return lambda parms: fn(**parms) if len(parms) > 0 else fn", "def test_function(arg):\n return arg * 2", "def getLambda(inp):\n\treturn 0.9/(getK1(inp) - getCMean(inp))", "def _fs(fn):\n return lambda fs, *args, **kwargs: fn(*args, **kwargs)", "def _generateLambda(self, string):\n derivation = self.fieldNames.sub(r'parent.getSampleValue(stats, \"\\1\")',\n string)\n return lambda stats, parent: eval(derivation)", "def f():", "def f():", "def __call__(fun_name):", "def get_lambda_value(lambda_node):\n return get_call_value(lambda_node.body)", "def _lambda_wrapper(*args, **kwargs):\n cold_start_duration = time.time() - constants.COLD_START_TIME\n trace = epsagon.trace.trace_factory.get_or_create_trace()\n trace.prepare()\n\n try:\n event, context = args\n except ValueError:\n # This can happen when someone manually calls handler without\n # parameters / sends kwargs. In such case we ignore this trace.\n return func(*args, **kwargs)\n\n if isinstance(event, dict):\n ignored_payloads = _get_ignored_payloads()\n if ignored_payloads and event in ignored_payloads:\n return func(*args, **kwargs)\n\n if os.environ.get(\n 'AWS_LAMBDA_INITIALIZATION_TYPE'\n ) == 'provisioned-concurrency':\n constants.COLD_START = False\n\n try:\n runner = epsagon.runners.aws_lambda.LambdaRunner(\n time.time(),\n context\n )\n trace.set_runner(runner)\n # pylint: disable=W0703\n except Exception as exception:\n # Regress to python runner.\n warnings.warn(\n 'Lambda context is invalid, using simple python wrapper',\n EpsagonWarning\n )\n trace.add_exception(\n exception,\n traceback.format_exc()\n )\n return epsagon.wrappers.python_function.wrap_python_function(\n func,\n args,\n kwargs\n )\n\n if constants.COLD_START:\n runner.resource['metadata'][\n 'aws.lambda.cold_start_duration'\n ] = cold_start_duration\n constants.COLD_START = False\n\n try:\n trace.add_event(\n epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory(\n time.time(),\n event,\n context\n )\n )\n # pylint: disable=W0703\n except Exception as exception:\n trace.add_exception(\n exception,\n traceback.format_exc(),\n additional_data={'event': event}\n )\n\n if not trace.disable_timeout_send:\n trace.set_timeout_handler(context)\n\n result = None\n try:\n result = func(*args, **kwargs)\n if trace.propagate_lambda_id and isinstance(result, dict):\n result[EPSAGON_EVENT_ID_KEY] = runner.event_id\n runner.resource['metadata']['propagation_enabled'] = True\n return result\n # pylint: disable=W0703\n except Exception as exception:\n runner.set_exception(\n exception,\n traceback.format_exc(),\n handled=False\n )\n raise\n finally:\n try:\n _add_status_code(runner, result)\n if not trace.metadata_only:\n runner.resource['metadata']['return_value'] = result\n # pylint: disable=W0703\n except Exception as exception:\n trace.add_exception(\n exception,\n traceback.format_exc(),\n )\n try:\n if not trace.disable_timeout_send:\n epsagon.trace.Trace.reset_timeout_handler()\n # pylint: disable=W0703\n except Exception:\n pass\n try:\n epsagon.trace.trace_factory.send_traces()\n # pylint: disable=W0703\n except Exception:\n epsagon.utils.print_debug('Failed to send Lambda trace')", "def sample(self, x):", "def wrapper(*args, **kwargs):\r\n return lambda: func(*args, **kwargs)", "def func_star(a_b):\n return insideroutine(*a_b)", "def __call__(self, example):\n return self.test(example)", "def test_py_closure(self):", "def lambda_(s, x_r):\n return LAMBDA / linalg.norm(np.array(s) - np.array(x_r))", "def dummy_wrapper(func):\n return func", "def project_lambda(self, lambda_vec):\n return lambda_vec", "def test_Lambda_g(self):\n self.setUp()\n tmp = np.arange(1, 49).reshape(3, 2, 4, 2)\n g1 = np.broadcast_to(tmp[..., None], tmp.shape + (2,)).swapaxes(1, -1)\n f = .02 * np.arange(1, 25).reshape(3, 2, 4)\n Lambda_g1 = self.E_func.Lambda_g(g1, f)\n Lambda_g1_ = np.array([[0.25, 0.65], [0.57, 1.61]])\n np.testing.assert_almost_equal(Lambda_g1[0, :, 0], Lambda_g1_)", "def ExpU(x):\n\treturn sum(x * lambdas)", "def closure(self, t):\n raise NotImplementedError", "def func():", "def lambda_handler(event, context):\n return", "def lambda_handler(event, context):\n return", "def _lambda_wrapper(*args, **kwargs):\n cold_start_duration = time.time() - constants.COLD_START_TIME\n trace = epsagon.trace.trace_factory.get_or_create_trace()\n trace.prepare()\n\n try:\n event, context = args\n except ValueError:\n # This can happen when someone manually calls handler without\n # parameters / sends kwargs. In such case we ignore this trace.\n return func(*args, **kwargs)\n\n try:\n runner = epsagon.runners.aws_lambda.StepLambdaRunner(\n time.time(),\n context\n )\n trace.set_runner(runner)\n # pylint: disable=W0703\n except Exception as exception:\n # Regress to python runner.\n warnings.warn(\n 'Lambda context is invalid, using simple python wrapper',\n EpsagonWarning\n )\n trace.add_exception(\n exception,\n traceback.format_exc()\n )\n return epsagon.wrappers.python_function.wrap_python_function(\n func,\n args,\n kwargs\n )\n\n if constants.COLD_START:\n runner.resource['metadata'][\n 'aws.lambda.cold_start_duration'\n ] = cold_start_duration\n constants.COLD_START = False\n\n try:\n trace.add_event(\n epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory(\n time.time(),\n event,\n context\n )\n )\n # pylint: disable=W0703\n except Exception as exception:\n trace.add_exception(\n exception,\n traceback.format_exc(),\n additional_data={'event': event}\n )\n\n trace.set_timeout_handler(context)\n\n result = None\n try:\n result = func(*args, **kwargs)\n steps_data = epsagon.utils.find_in_object(\n event,\n STEP_DICT_NAME\n )\n\n if isinstance(result, dict):\n epsagon.utils.print_debug(\n 'Step function result type is dict, steps_data={}'.format(\n steps_data\n )\n )\n # If the step functions data is not present, then this is the\n # First step.\n if steps_data is None:\n epsagon.utils.print_debug(\n 'Could not find existing steps data'\n )\n steps_dict = {'id': str(uuid4()), 'step_num': 0}\n path = []\n # Otherwise, just advance the steps number by one.\n else:\n # don't change trigger data\n steps_dict, path = steps_data\n steps_dict = copy.deepcopy(steps_dict)\n if 'step_num' in steps_dict:\n steps_dict['step_num'] += 1\n epsagon.utils.print_debug(\n 'Steps data found, new dict={}'.format(steps_dict)\n )\n else:\n steps_dict = {'id': str(uuid4()), 'step_num': 0}\n epsagon.utils.print_debug(\n 'Steps data not found, new dict={}'.format(\n steps_dict\n )\n )\n\n result_path = result\n # Tries to inject the steps data in the configured\n # or same path where it was found\n if isinstance(trace.step_dict_output_path, list):\n path = trace.step_dict_output_path\n try:\n for sub_path in path:\n result_path = result_path.get(sub_path)\n except Exception as exception: # pylint: disable=broad-except\n epsagon.utils.print_debug(\n 'Could not put steps in path={}'.format(path)\n )\n if result_path:\n epsagon.utils.print_debug(\n 'Adding steps dict to result_path={}'.format(\n result_path\n )\n )\n result_path[STEP_DICT_NAME] = steps_dict\n else:\n epsagon.utils.print_debug(\n 'Adding steps dict to root result'\n )\n result[STEP_DICT_NAME] = steps_dict\n\n runner.add_step_data(steps_dict)\n return result\n # pylint: disable=W0703\n except Exception as exception:\n runner.set_exception(\n exception,\n traceback.format_exc(),\n handled=False\n )\n raise\n finally:\n try:\n _add_status_code(runner, result)\n if not trace.metadata_only:\n runner.resource['metadata']['return_value'] = (\n copy.deepcopy(result)\n )\n # pylint: disable=W0703\n except Exception as exception:\n trace.add_exception(\n exception,\n traceback.format_exc(),\n )\n try:\n epsagon.trace.Trace.reset_timeout_handler()\n # pylint: disable=W0703\n except Exception:\n pass\n try:\n epsagon.trace.trace_factory.send_traces()\n # pylint: disable=W0703\n except Exception:\n pass", "def c_test_mutate_function(self, function):\r\n return 1", "def map(self, function):\n pass", "def dummy(*args, **kwargs):\r\n pass", "def lambda_handler(event, context):\n return dispatch(event)", "def evaluate_random_function(f, x, y, lambdas):\n if lambdas:\n return f(x, y)\n\n if len(f) == 3:\n x2 = evaluate_random_function(f[1], x, y, lambdas)\n y2 = evaluate_random_function(f[2], x, y, lambdas)\n\n for i in range(len(funcs)):\n if f[0] == funcs[i][0]:\n return funcs[i][1](x2, y2)\n else:\n for i in range(len(funcs)):\n if f[0] == funcs[i][0]:\n return funcs[i][1](x, y)", "def test_do_non_gf():\n f = lambda: None\n with raises(TypeError) as err_info:\n perf(do(f)())\n assert str(\n err_info.value\n ) == \"%r is not a generator function. It returned None.\" % (f,)", "def test_invoke_anonymous_pipe():\n\n def processor_a(app, items):\n items = list(items)\n assert items == [holocron.Item({\"a\": \"b\"})]\n items[0][\"x\"] = 42\n yield from items\n\n def processor_b(app, items):\n items = list(items)\n assert items == [holocron.Item({\"a\": \"b\", \"x\": 42})]\n items.append(holocron.Item({\"z\": 13}))\n yield from items\n\n def processor_c(app, items):\n items = list(items)\n assert items == [\n holocron.Item({\"a\": \"b\", \"x\": 42}),\n holocron.Item({\"z\": 13}),\n ]\n yield from items\n\n testapp = holocron.Application()\n testapp.add_processor(\"processor_a\", processor_a)\n testapp.add_processor(\"processor_b\", processor_b)\n testapp.add_processor(\"processor_c\", processor_c)\n\n stream = testapp.invoke(\n [\n {\"name\": \"processor_a\"},\n {\"name\": \"processor_b\"},\n {\"name\": \"processor_c\"},\n ],\n [holocron.Item({\"a\": \"b\"})],\n )\n\n assert next(stream) == holocron.Item({\"a\": \"b\", \"x\": 42})\n assert next(stream) == holocron.Item({\"z\": 13})\n\n with pytest.raises(StopIteration):\n next(stream)", "def test_of_with_args(self) -> None:\n assert Result.of(lambda x: bool(x > 0), 1).unwrap() is True", "def get_lambda(model):\n best_lambdas = [1000.0, 0.001, 100.0, 0.001, 100.0, 100.0, 0.001, 100.0]\n lambda_ = best_lambdas[model]\n return lambda_", "def test_ordered_cmp_function_lambda_too_many_arguments(cls):\n sl = orderedstructs.SkipList(object, lambda x, y, z: x + y + z < 0)\n sl.insert(4.0)\n with pytest.raises(TypeError) as err:\n sl.insert(8.0)\n if sys.version_info.major == 2:\n exp = '<lambda>() takes exactly 3 arguments (2 given)'\n elif sys.version_info.major == 3:\n if sys.version_info.minor >= 10:\n exp = \"test_ordered_cmp_function_lambda_too_many_arguments.<locals>.<lambda>() missing 1 required positional argument: 'z'\"\n else:\n exp = \"<lambda>() missing 1 required positional argument: 'z'\"\n else:\n assert 0, 'Unsupported Python version.'\n assert err.value.args[0] == exp", "def as_args(function):\n return lambda x: function(*x)", "def constfn(val):\n\n def func(_):\n return val\n\n return func", "def very_simple():\n yield 1", "def default(access):\n return lambda *args, **kwargs: access", "def mlift(func):\n return compose(unit, func)", "def carmichaellambda(n):\r\n\treturn carmichael_lambda(n)", "def transformed_q_lambda(\n q_tm1: Array,\n a_tm1: Array,\n r_t: Array,\n discount_t: Array,\n q_t: Array,\n lambda_: Array,\n stop_target_gradients: bool = True,\n tx_pair: TxPair = IDENTITY_PAIR,\n) -> Array:\n chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t, lambda_],\n [2, 1, 1, 1, 2, {0, 1}])\n chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t, lambda_],\n [float, int, float, float, float, float])\n\n qa_tm1 = base.batched_index(q_tm1, a_tm1)\n v_t = jnp.max(q_t, axis=-1)\n target_tm1 = transformed_lambda_returns(\n tx_pair, r_t, discount_t, v_t, lambda_, stop_target_gradients)\n return target_tm1 - qa_tm1", "def test_no_requirements(self):\n def f():\n pass\n self._run_as_operator(f)", "def is_lambda(fun):\n return isinstance(fun, type(LAMBDA)) and fun.__name__ == LAMBDA.__name__", "def my_fuction():\n pass", "def test_shortcut_2nested_callback():\n\n s = ast_lambda(\n \"ds.Select(lambda e: e.TrackStuffs()).Select(lambda ts: ts.Where(lambda t: t.pt() > 10))\"\n )\n objs = ObjectStream[Iterable[Event]](ast.Name(id=\"ds\", ctx=ast.Load()))\n\n new_objs, new_s, expr_type = remap_by_types(objs, \"ds\", Iterable[Event], s)\n\n assert ast.dump(new_s) == ast.dump(\n ast_lambda(\n \"ds.Select(lambda e: e.TrackStuffs())\"\n \".Select(lambda ts: ts.Where(lambda t: t.pt() > 10))\"\n )\n )\n assert ast.dump(new_objs.query_ast) == ast.dump(\n ast_lambda(\"MetaData(ds, {'t': 'track stuff'})\")\n )\n assert expr_type == Iterable[Iterable[TrackStuff]]", "def labwrap(key, m_fun, inner_fun):\n return lambda: m_fun(key, inner_fun)", "def GenerateNormalFunction(n):\n return eval('lambda a: %s' % GenerateNormalExpression(n))", "def helper(num):\r\n \r\n return lambda x: num * product(x)", "def leafEvaluation(self, state):\n\n \"\"\"\n Use random generated values for now\n \"\"\"\n z = np.random.randint(2)\n v = random.uniform(0, 1) \n return (1-LAMBDA) * v + LAMBDA * z", "def test_ordered_cmp_function_lambda_too_few_arguments(cls):\n sl = orderedstructs.SkipList(object, lambda x: x < 0)\n sl.insert(4.0)\n with pytest.raises(TypeError) as err:\n sl.insert(8.0)\n if sys.version_info.major == 2:\n exp = '<lambda>() takes exactly 1 argument (2 given)'\n elif sys.version_info.major == 3:\n if sys.version_info.minor >= 10:\n exp = 'test_ordered_cmp_function_lambda_too_few_arguments.<locals>.<lambda>() takes 1 positional argument but 2 were given'\n else:\n exp = '<lambda>() takes 1 positional argument but 2 were given'\n else:\n assert 0, 'Unsupported Python version.'\n assert err.value.args[0] == exp", "def squared_call(fn, arg):\n return fn(fn(arg))", "def __init__(self, lambda_=10):\n self.lambda_ = lambda_\n super().__init__()", "def carmichaellambda(n):\n\treturn carmichael_lambda(n)", "def test_func_2(self):\n def func():\n return 0\n self.assertEqual(type(decorators.timeit_2(func)), types.FunctionType)" ]
[ "0.71773744", "0.69108605", "0.6901355", "0.65597636", "0.6500524", "0.62655693", "0.62334967", "0.6138908", "0.6111436", "0.6015638", "0.5991434", "0.5974308", "0.5957245", "0.5953069", "0.59432507", "0.59294564", "0.5925585", "0.59229636", "0.5896368", "0.58732843", "0.58524406", "0.58370405", "0.57956797", "0.5773281", "0.57245713", "0.5722594", "0.57100433", "0.57077277", "0.5687446", "0.5683592", "0.56815696", "0.56756604", "0.5627688", "0.5619444", "0.5611675", "0.5611534", "0.5604553", "0.5597808", "0.5593959", "0.55883944", "0.55659646", "0.5556419", "0.5555949", "0.55477715", "0.5544017", "0.55352664", "0.54937625", "0.5488379", "0.548671", "0.5481214", "0.54718167", "0.5471706", "0.5471706", "0.5469601", "0.5459588", "0.54527414", "0.54511136", "0.5447573", "0.544068", "0.5435942", "0.5433385", "0.54270124", "0.54228926", "0.5414823", "0.53942406", "0.5394073", "0.53922296", "0.539067", "0.5380052", "0.5380052", "0.5376308", "0.53712875", "0.53673637", "0.5355893", "0.5352155", "0.535131", "0.5336513", "0.5335745", "0.53347886", "0.53219354", "0.5321281", "0.5308199", "0.53035504", "0.53004855", "0.52978647", "0.52944267", "0.52943903", "0.52943105", "0.52789706", "0.527612", "0.5271948", "0.52706665", "0.5265986", "0.5256103", "0.52536064", "0.5250009", "0.52414274", "0.5235558", "0.52337843", "0.52331877", "0.5221115" ]
0.0
-1
Checks num for primality. Returns bool.
def isPrime(num): if num == 2: return True elif num < 2 or not num % 2: # even numbers > 2 not prime return False # factor can be no larger than the square root of num for i in range(3, int(num ** .5 + 1), 2): if not num % i: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_prime(self, num):\n if num == 2:\n return True\n if num < 2 or num % 2 == 0:\n return False\n for n in range(3, int(num ** 0.5) + 2, 2):\n if num % n == 0:\n return False\n return True", "def is_prime(num: int) -> bool:\n return factorial(num - 1) % num != 0", "def is_prime(num):\n for i in range(2, num):\n if num % i == 0:\n return False\n return True", "def is_prime(num):\n import math\n\n\n if num % 2 == 0 and num > 2:\n return False\n for i in range(3, int(math.sqrt(num))+1, 2):\n if num % i == 0:\n return False\n return True", "def is_prime(num):\r\n if num == 0 or num == 1:\r\n return False\r\n for i in range(2, num):\r\n if num % i == 0:\r\n return False\r\n else:\r\n return True", "def isPrime(self, num):\n\t\tif num == 2 or num == 3:\n\t\t\treturn True\n\t\tif num % 2 == 0 or num < 2:\n\t\t\treturn False\n\n\t\tfor prime in self.prev:\n\t\t\tif num % prime == 0:\n\t\t\t\treturn False\n\t\treturn True", "def is_prime(num):\n if num == 0 or num == 1:\n return False\n for x in range(2, num):\n if num % x == 0:\n return False\n else:\n return True", "def is_prime(num):\n\n assert num >= 0, \"Num should be a positive integer!\"\n\n if num < 2:\n return False\n\n if num == 2:\n return True\n\n if num % 2 == 0:\n return False\n\n n = 3\n while n * n <= num:\n if num % n == 0:\n return False\n n += 2\n\n return True", "def prime_checker(num):\n\n assert num > 0\n\n if num < 2:\n return False\n\n if num == 2:\n return True\n\n if num % 2 == 0:\n return False\n\n n = 3\n\n while n * n <= num:\n\n if num % n == 0:\n return False\n\n else:\n num += 2\n\n return True", "def is_prime(num):\n if is_even(num) and num != 2 or num == 1:\n return False\n\n for dd in range(3, int(mt.sqrt(num)) + 1):\n if num % dd == 0:\n return False\n\n return True", "def is_prime(num):\n for n in range(2, num):\n if num % n == 0:\n return False\n\n else:\n return True", "def is_prime(num):\n for x in range(2, num + 1):\n if num % x == 0:\n return False\n return True", "def is_prime(num):\n if not isinstance(num, int):\n return False\n if num <= 1:\n return False\n if num == 2 or num == 3:\n return True\n if num % 6 in [0, 2, 3, 4]:\n return False\n div_max = int(math.sqrt(num))\n for div in range(5, div_max + 1, 2):\n if num % div == 0:\n return False\n return True", "def is_prime(num):\n\tif num is 1:\n\t\treturn False\n\tif num % 2 is 0:\n\t\treturn num is 2\n\n\tdivision = 3\n\twhile (division * division) <= num:\n\t\tif num % division is 0:\n\t\t\treturn False\n\t\tdivision += 2\n\treturn True", "def isPrime(num):\r\n if num < 1:\r\n return False\r\n elif num == 2:\r\n return True\r\n else:\r\n for i in range(2, num):\r\n if num % i == 0:\r\n return False\r\n return True", "def isprime(number: int) -> bool:\n for i in range(2, int(number ** 0.5) + 1):\n if number % i == 0:\n return False\n return True", "def is_prime(num):\n if num < 2:\n return False\n\n for i in range(2, num):\n if num % i == 0:\n return True", "def is_prime(num):\n\n if num == 2:\n return True\n for i in range(2, num):\n if num % i == 0:\n return False\n return True", "def is_prime(num):\n\tsquare_root = int(math.ceil(math.sqrt(num)))\n\tfor n in range(2, square_root+1):\n\t\tif num % n == 0:\n\t\t\tif num != n:\n\t\t\t\treturn False\n\n\treturn True", "def is_prime(num):\n if num < 2:\n return False\n elif num == 2:\n return True\n\n for i in range(2, int(num**(1/2))+1):\n if num % i == 0:\n return False\n\n return True", "def is_prime_by_python(num):\n if num == 2:\n return True\n elif num % 2 == 0 or num <= 1:\n # even or smaller then one\n return False\n else:\n res = True\n partial_num_range = int(num / 4) + 1\n\n for i in range(1, partial_num_range):\n if num % (2 * i + 1) == 0:\n res = False\n break\n return res", "def isprime(number):\n\n if number == 1:\n return False\n for i in range(2, int(number**0.5) + 1):\n if number % i == 0:\n return False\n return True", "def is_prime_number(number_):\n flag = 0\n for values in range(2, number_//2):\n if number_ % values == 0:\n flag += 1\n if flag == 1:\n return True\n else:\n return False", "def is_prime(n):\n return mr_prime(n)", "def is_prime(n):\r\n if n in (2, 3, 5, 7, 11, 13, 17, 19): return(True)\r\n if (n<=1 or n%2==0 or n%3==0): return(False)\r\n # determine upper limit of test range =>\r\n ulimit = (int(math.ceil(math.sqrt(n)))+1)\r\n return(not any(n%k==0 for k in range(3, ulimit, 2)))", "def prime_checker(num):\n if num <= 0:\n return \"Error: num must be a positive nonzero integer\"\n elif num <= 3:\n return num > 1\n elif num % 2 == 0 or num % 3 == 0:\n return False\n else:\n k = 5\n while k * k < num:\n if (num % k == 0) or (num % (k+2) == 0):\n return False\n k += 6\n return True", "def isprime(n):\r\n\treturn is_prime(n)", "def is_prime(num: int) -> bool:\n if num < 2:\n return False\n low_primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73,\n 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,\n 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251,\n 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,\n 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443,\n 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557,\n 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647,\n 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757,\n 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863,\n 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983,\n 991, 997]\n if num in low_primes:\n return True\n for prime in low_primes:\n if num % prime == 0:\n return False\n return rabin_miller(num)", "def is_prime(n: int) -> bool:\n if n <= 1:\n return False\n\n for i in range(2, int(n ** 0.5) + 1):\n if n % i == 0:\n return False\n return True", "def isprime(n):\n\treturn is_prime(n)", "def is_prime(num):\n # 2 is prime; exclude\n if num == 2: \n return True\n \n # exclude all other even numbers and numbers less than 2\n if num % 2 == 0 or num < 2:\n return False\n \n # Only need to count up to the the square root of num\n sqrt = int(num ** 0.5 +1) # int rounds down; correct by +1\n \n # Loop through all odd numbers\n for i in range(3, sqrt, 2):\n if num % i == 0:\n return False\n return True", "def is_prime(number: int) -> bool:\n\n if number % 2 == 0 and number > 2:\n return False\n return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))", "def is_prime(number):\n if number <=3:\n return True\n \n for i in range(2, number):\n if number % i == 0:\n return False\n \n return True", "def isprime(n):\n if n % 2 == 0:return False\n return all(n % i for i in range(3, int(n**0.5) + 1, 2))", "def comprobar_primo(num):\n primo = True\n for i in range(2, num):\n if num%i == 0:\n primo = False\n return primo", "def is_prime(x: int) -> bool:\n return not any(x % i == 0 for i in range(2, int(math.sqrt(x)+1)))", "def primenumber(x):\n if x >= 2:\n for y in range(2,x):\n if not (x % y):\n return False\n else:\n return False\n return True", "def isprime(n=936):\n if n < 3: return False\n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def if_prime(cls, n):\n\n if (n <= 1):\n return False\n if (n <= 3):\n return True\n\n if (n % 2 == 0 or n % 3 == 0):\n return False\n\n i = 5\n while(i * i <= n):\n if (n % i == 0 or n % (i + 2) == 0):\n return False\n i = i + 6\n\n return True", "def is_prime(number):\n if number == 2:\n return True\n\n if number <= 1 or number % 2 == 0:\n return False\n\n # check to see if number has any odd factors\n for x in range(3, int(number ** 0.5) + 1, 2):\n if number % x == 0:\n return False\n return True", "def is_prime(number):\n if number <= 1:\n return False\n\n max_element = int(math.ceil(math.sqrt(number)))\n # iterate through all elements from 2 through sqrt(n)\n for element in range(2,max_element + 1):\n if number % element == 0:\n return False\n\n return True", "def test_prime(n):\n if SIEVE[n]:\n return True\n else:\n return False", "def is_prime(n):\n global primes\n if n<2:\n return False\n if n==2:\n return True\n for prime in primes:\n if prime>n**0.5+1:\n return True\n if n%prime==0:\n return False\n # For the case that the number is bigger than the square of our largest prime\n for num in range(primes[-1]+2,n**0.5+1,2):\n if n%num==0:\n return False\n return True", "def isprime(n: int) -> bool:\r\n if n > 1:\r\n for i in range(2, int(n / 2) + 1):\r\n if (n % i) == 0:\r\n return False\r\n else:\r\n return True\r\n\r\n else:\r\n return False", "def isprime(n):\n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def isPrime(n, primes):\n\n k = math.log(n, 2) # number of bits in n\n r = getRounds(k)\n\n return checks(n, primes, r) # run checks", "def is_prime(n):\n\tb = 2\n\twhile b <= math.sqrt(n):\n\t\tif n % b == 0:\n\t\t\treturn False\n\t\tb += 1\n\treturn True", "def is_prime(number):\n number = int(number)\n\n if number < 2:\n return False\n if number < 4:\n return True\n if number % 2 == 0:\n return False\n for d in range(3, number // 2, 2):\n if number % d == 0:\n return False\n return True", "def is_prime(number: int):\n\n for index in range(2, (number//2) + 1):\n if number%index == 0:\n return False\n return True", "def is_prime(n):\n if n < 2:\n return False\n if n in [2,3]:\n return True\n if n % 2 == 0:\n return False\n\n for factor in range(3, int(math.sqrt(n))+1, 2):\n if n % factor == 0:\n return False\n return True", "def isPrime(n: int) -> bool:\n if n == 1:\n return False\n # handle boundary conditions\n if n == 2 or n == 3:\n return True\n # Now check for divisibility of n by 2 & 3\n if n % 2 == 0 or n % 3 == 0:\n return False\n\n i = 5\n while (i * i <= n):\n if n % i == 0 or n % (i + 2) == 0:\n return False\n\n i = i + 6\n return True", "def is_prime(n):\n for i in range(2,n):\n if n % i == 0:\n return False\n return True", "def is_prime(number):\n #for i in range(2, ceil(sqrt(number))):\n for i in range(2, number):\n if number % i == 0:\n return False\n return True", "def is_prime(number):\n\t\n\tif number < 2: return False\n\telif number == 2: return True\n\telif number % 2 == 0: return False\n\telse:\n\t\tfor x in range(2, number):\n\t\t\tif number % x == 0:\n\t\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def is_prime(n):\n if n <= 1:\n return False\n elif n <= 2:\n return True\n elif n % 2 == 0:\n return False\n else:\n for i in range(3, int(n**.5) + 1, 2):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n if n == 1:\n return False\n else:\n i = 2\n while i < n:\n if n % i == 0:\n return False\n i += 1\n return True", "def isprime(n):\n\n if n % 2 == 0:\n return False\n\n # else take square root and iterate over all uneven (step 2) numbers\n sqrt_n = int(math.floor(math.sqrt(n)))\n for i in range(3, sqrt_n + 1, 2):\n if n % i == 0:\n return False\n\n return True", "def is_prime(n):\n\t\n\tif n < 2:\n\t\treturn False\n\t\n\tif not n % 2:\n\t\treturn False\n\t\n\tfor possible_factor in range(3, int(sqrt(n)) + 1, 2):\n\t\tif not n % possible_factor:\n\t\t\treturn False\n\treturn True", "def is_prime(num1):\n num2 = 2\n while num2 < num1:\n if num1 % num2 == 0:\n return False\n num2 += 1\n return True", "def is_prime(n, primes):\n max_factor = math.sqrt(n)\n for p in primes:\n if n % p == 0:\n return False\n if p > max_factor:\n break\n\n return True", "def isPrime(x):\n for i in range(2,int(x**0.5)+1):\n if (x % i == 0):\n return False\n\n return True", "def is_prime(num):\n\n # Quick test for small prime numbers\n if num <= 3:\n return True\n\n # Quick test for even numbers\n # We only need to check up to the square root of n\n for a in range (2, math.sqrt(num)):\n if num % a == 0:\n return False\n \n # Implement an algorithm below to test for primes,\n # e.g. Sieve of Eratosthenes\n # this algorithm just divides by everything\n for i in range(1, int(math.sqrt(num))):\n if num % i == 0:\n return False \n return True\n \n # the code I submitted\n \n def is_prime(num):\n # quick test to test small numbers\n if num <= 3:\n return (True)\n elif num % 2 == 0:\n return (False)\n sqr = int(math.sqrt(num)) + 1\n for a in range(3, sqr, 2):\n if num % a == 0:\n return False\n return True", "def is_prime(self, n):\n if n < self.max_prime:\n return n in self.prime_set\n for p in self.primes:\n if p * p > n:\n break\n if not n % p:\n return False\n return True", "def is_prime(x: int) -> bool:\n if x < 2:\n return False\n if x != 2 and x % 2 == 0:\n return False\n for i in range(3, x // 2 + 1):\n if x % i == 0:\n return False\n return True", "def is_prime(n):\n if n < 2:\n return False\n if n == 2 | n == 3:\n return True\n if n % 2 == 0 | n % 3 == 0:\n return False\n for i in range(2, int(sqrt(n))+1):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n if n < 2:\n return False\n if n == 2 | n == 3:\n return True\n if n % 2 == 0 | n % 3 == 0:\n return False\n for i in range(2, int(sqrt(n))+1):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n if n <= 1:\n return False\n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def is_prime(number):\n if number == 0 or number == 1:\n return False\n\n isprime = True\n for test in range(2, int(math.sqrt(number) + 1)): # +1 since we have to test up to the square root value\n if number % test == 0:\n isprime = False\n break\n return isprime", "def isPrime(n: int):\n if n <= 1:\n return False\n\n for i in range(2, n-1):\n if n % i == 0:\n # print(\"{} is divisable by {}\".format(n, i))\n return False\n\n return True", "def is_prime(n):\n if n < 1 or n % 1 > 0:\n return False\n if n == 1 or n == 2:\n return True\n for i in range(3, int(math.sqrt(n)) + 1):\n if n % i == 0:\n return False\n return True", "def is_prime(p):\n if p == 1:\n return False\n for n in range(2, int(math.sqrt(p))+1):\n if p % n == 0:\n return False\n return True", "def is_prime(n):\n if n == 2:\n return True\n if n == 0 or n == 1 or n % 2 == 0:\n return False\n for i in range(3, int(math.sqrt(n))+1, 2):\n if n % i == 0:\n return False\n return True", "def is_prime(self):\n pass", "def checkPerfectNumber(self, num: int) -> bool:\n if num <= 0:\n return False\n s = 0\n for i in range(1, int(math.sqrt(num) + 1)):\n if i != num:\n res = num % i\n if res == 0:\n s += i\n divisor = num // i\n if divisor != num:\n s += divisor\n if s > num:\n return False\n return s == num", "def is_prime(n: int) -> bool:\n if n <= 3:\n return n > 1\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i ** 2 <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True", "def good_prime(p):\n return p % 4 == 3 and probablyPrime(p, accuracy=100)", "def isPrime(n):\n for i in range (2, n/2+1):\n if n % i == 0:\n return False\n return True", "def is_prime(number):\n\tif number < 0:\n\t\treturn False\n\tif number < 4:\n\t\treturn True\n\t#start with number 2, iterate up until up to half the number is reached\n\tfor x in range(2, int(number/2)+1):\n\t\tif number%x == 0:\n\t\t\treturn False\n\treturn True", "def is_prime(n):\n assert n >= 1, \"n is not a positive integer\"\n k = 2\n if n == 1:\n flag = False\n else:\n flag = True\n while k <= sqrt(n):\n if n % k == 0:\n flag = False\n break\n k += 1\n return flag", "def is_prime(n):\n if n == 2:\n return True\n\n if n < 2 or n % 2 == 0:\n return False\n\n for i in range(3, int(sqrt(n)+1), 2):\n if n % i == 0:\n return False\n\n return True", "def is_prime(n):\n if n < 2:\n return False\n if n == 2 or n == 3:\n return True\n elif n % 2 == 0:\n return False\n else:\n x = 0\n for i in range(3, n, 2):\n if n % i == 0:\n x = 1\n return x == 0", "def is_prime(num):\n\n assert num >= 0, \"Num should be a positive integer!\"\n\n # definition: 0 and 1 are not prime\n if num < 2:\n return False\n\n # definition: 2 is prime\n if num == 2:\n return True\n\n # if it's divisible by 2, it's not prime\n # (We do this as a special case, so that after this we can check\n # only odd numbers -- all even numbers are divisible by 2)\n if num % 2 == 0:\n return False\n\n # see if number is prime -- we'll do this by checking\n # to see if there's any odd number 3 .. sqrt(num)\n # that evenly divides num (why square root? think about it!)\n\n n = 3\n\n while n * n <= num:\n if num % n == 0:\n return False\n # Go to next odd number\n n += 2\n\n return True", "def isPrime(n):\r\n # Znamo da 1 nije prost broj\r\n if n == 1:\r\n return False\r\n\r\n i = 2\r\n # Petlja se vrti od 2 do int(sqrt(x)) \r\n while i*i <= n:\r\n # Provjera da li i dijeli x bez ostatka\r\n if n % i == 0:\r\n # To znači da n ima faktor između 2 i sqrt(n)\r\n # Stoga nije prost broj\r\n return False\r\n i += 1\r\n # Ako nismo pronašli nijedan faktor u gornjoj petlji\r\n # onda je n prost broj\r\n return True", "def prime(n: int) -> bool:\n if len(divisors(n)) > 2 or n < 1:\n return False\n else:\n return True", "def is_prime(a):\n return all(a % i for i in xrange(2, a))", "def is_prime(n):\n k = 2\n while n % k != 0:\n k += 1\n if k < n:\n return False\n else:\n return True", "def is_prime(number):\n\tif number < 4:\n\t\treturn True\n\t#start with number 2, iterate up until up to half the number is reached\n\tfor x in range(2, int(number/2)+1):\n\t\tif number%x == 0:\n\t\t\treturn False\n\treturn True", "def is_prime(i, primes):\n is_prime = False\n for prime in list(primes):\n if np.sqrt(i) < prime:\n return not is_prime\n if not (i == prime or i % prime):\n return is_prime\n return not is_prime", "def isprime(x):\n if x <= 1: return False \n if x % 2 == 0: return x == 2\n for k in range(3, int(sqrt(x))+1, 2): \n if x % k == 0: return False\n return True", "def is_prime(n):\n for k in range(2, (n // 2) + 1):\n if n % k == 0:\n return False\n \n return True", "def is_prime(n):\n if n < 2:\n return False\n if n == 2:\n return True\n if (n%2) == 0:\n return False\n for i in range(3,integer_sqrt(n)+1,2):\n if (n%i) == 0:\n return False\n return True", "def isprime(n):\n # make sure n is a positive integer\n n = abs(int(n))\n # 0 and 1 are not primes\n if n < 2:\n return False\n # 2 is the only even prime number\n if n == 2:\n return True\n # all other even numbers are not primes\n if not n & 1:\n return False\n # range starts with 3 and only needs to go up the squareroot of n\n # for all odd numbers\n for x in range(3, int(int(n ** 0.5) ** 0.5) + 1, 2):\n if n % x == 0:\n return False\n return True", "def is_prime(n):\n \n for i in range(3, int(n**0.5+1), 2):\n if n % i == 0:\n print(n,'is not prime')\n return False\n\n print(n,'is prime') \n return True", "def isPrime(n):\n\n if n < 2:\n return False\n elif n in {2,3}:\n return True\n elif n % 2 == 0:\n return False\n else:\n for i in range(3,math.floor(math.sqrt(n))+1,2):\n if n % i == 0:\n return False\n else:\n return True", "def is_prime(n):\n for k in range(2, (n // 2) + 1):\n if n % k == 0:\n return False\n\n return True", "def is_prime(n):\n if n <= 1: return False\n if n <= 3: return True\n\n if (n % 2 == 0 or n % 3 == 0):\n return False\n\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True", "def is_prime(value: int) -> bool:\n\n if value == 1:\n return False\n if value <= 0:\n raise ValueError(\"Value must be greater than zero\")\n\n for i in range(2, int(value**(1/2)) + 1):\n if value % i == 0:\n return False\n return True", "def is_prime(n):\n if n <= 1:\n return False\n if n < 4:\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n\n limit = int(math.floor(math.sqrt(n)))\n i = 5\n while i <= limit:\n if n % i == 0:\n return False\n if n % (i + 2) == 0:\n return False\n i += 6\n return True", "def is_prime(self, it):\n return it > 0 \\\n and (it == 2 or it % 2 != 0) \\\n and (it == 1 or not (any(it % number == 0 for number in range(3, it // 2, 2))))", "def is_prime(n):\n i, count = 2, 0\n while i < n:\n if n % i == 0:\n count += 1\n break\n i += 1\n if count == 0 and n != 1:\n return True\n else:\n return False" ]
[ "0.818409", "0.8014647", "0.7901733", "0.7833093", "0.782687", "0.7789034", "0.7774602", "0.77519137", "0.773821", "0.77361023", "0.77142495", "0.77123564", "0.77093136", "0.7696949", "0.76745045", "0.76480395", "0.7635072", "0.760836", "0.7582893", "0.7576612", "0.75485444", "0.7535991", "0.74857146", "0.7464267", "0.74388236", "0.74221736", "0.7421074", "0.7407042", "0.7405216", "0.73967785", "0.73937184", "0.737833", "0.7365658", "0.7319796", "0.7316538", "0.7308377", "0.7301964", "0.727102", "0.7266555", "0.72659206", "0.7264563", "0.72402734", "0.7235498", "0.72211474", "0.7207377", "0.72001654", "0.7198158", "0.7165037", "0.7158024", "0.71560633", "0.7149376", "0.71473104", "0.7141168", "0.7130117", "0.7128326", "0.7107859", "0.7105226", "0.71017784", "0.70943326", "0.70934325", "0.70928913", "0.70927644", "0.7090466", "0.70812756", "0.7080954", "0.7080954", "0.70793736", "0.70756835", "0.70695126", "0.7064088", "0.7064009", "0.70597905", "0.70596856", "0.7051773", "0.7051252", "0.7050751", "0.70502716", "0.7042045", "0.703847", "0.70383567", "0.7010857", "0.70067203", "0.7000577", "0.6999891", "0.69961846", "0.69797087", "0.6976953", "0.6974325", "0.69622135", "0.69550925", "0.6929628", "0.6924351", "0.69225883", "0.69125307", "0.69091284", "0.6908756", "0.69026214", "0.6900302", "0.6897715", "0.6890245" ]
0.7704085
13
Here we define the configuration settings needed for all ingestion plugins with reasonable defaults.
def vdk_configure(self, config_builder: ConfigurationBuilder) -> None: # Plugin-related configurations config_builder.add( key="INGEST_METHOD_DEFAULT", default_value=None, description="Default Ingestion method to be used.", ) config_builder.add( key="INGEST_TARGET_DEFAULT", default_value=None, description="Default Ingestion target to be used.", ) # Configure ingestion specific environment variables ingester_configuration.add_definitions(config_builder=config_builder)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configuration():", "def init_config(self):\n pass", "def configure(self):", "def configure(self):", "def configure(self):", "def configure(self):", "def configure(self):\n\n pass", "def get_default_config(self):\n config = super(SignalfxHandler, self).get_default_config()\n\n config.update({\n 'url': 'https://ingest.signalfx.com/v2/datapoint',\n 'batch': 300,\n # Don't wait more than 10 sec between pushes\n 'batch_max_interval': 10,\n 'auth_token': '',\n })\n\n return config", "def configure(self):\n pass", "def configure(self):\n pass", "def configure(self):\r\n pass", "def default_configs(cls):\n config: dict = super().default_configs()\n\n config.update({\n \"file_ext\": '.txt',\n \"num_sent_per_doc\": -1,\n \"doc_break_str\": None,\n \"column_format\": cls._DEFAULT_FORMAT,\n \"entity_mention_class\": None\n })\n return config", "def defaultconfig(self):\r\n\r\n config_data = {\r\n \"path_to_database\": \"FUDB/FOLLOWUP.DB\",\r\n \"path_to_frontend\": \"FUDB/\",\r\n \"path_to_dcs_info\": \"FUDB/\",\r\n \"path_to_bin\": \"bin/\",\r\n \"path_to_excels_exported_from_database\": \"excels exported/\",\r\n \"path_to_excels_to_be_imported_in_database\": \"excels to be imported/\",\r\n \"path_to_new_opfiles\": \"DC BATCHES IN WORK/0 NEW/\",\r\n \"path_to_batches_unassigned\": \"DC BATCHES IN WORK/1 UNASSIGNED/\",\r\n \"path_to_batches_prepfiles\": \"DC BATCHES IN WORK/2 PREPARED FILES/\",\r\n \"path_to_batches_assigned\": \"DC BATCHES IN WORK/3 ASSIGNED/\",\r\n \"path_to_batches_tobechecked\": \"DC BATCHES IN WORK/4 TO BE CHECKED/\",\r\n \"path_to_batches_tbimported\": \"DC BATCHES IN WORK/5 TO BE IMPORTED/\",\r\n \"path_to_batches_finished\": \"DC BATCHES IN WORK/6 FINISHED/\",\r\n \"path_to_batches_instandby\": \"DC BATCHES IN WORK/7 IN STANDBY/\",\r\n \"path_to_batches_unrecordable\": \"DC BATCHES IN WORK/8 UNRECORDABLE/\",\r\n \"batch_status_options_responsible\": \"PREP. OP FILE, IMPORTATION & SPLIT FILE, RELIABILITY & DATA UPGRADE, CHECK OP FILE, CHECK SPLIT FILE, CHECK FRONT END, **TO BE CHECKED\",\r\n \"batch_status_options_proofreader\": \"OP FILE OK, SPLIT FILE OK, FRONT END OK, **TO BE IMPORTED, **FINISHED, **REWORK, **STANDBY, **UNRECORDABLE\",\r\n \"batch_status_options_overall\": \"ONGOING, STANDBY, FINISHED, UNRECORDABLE\",\r\n \"aircrafts\": \"A300, A300-600, A310, A320, A330, A340, A350, A380\",\r\n \"split_batch_factor\": \"2, 3, 4, 5, 6, 7, 8, 9\",\r\n \"IDlentgh\": \"6\",\r\n \"port\": \"5000\"\r\n }\r\n \r\n if not os.path.isfile(os.path.join(self.cwd, \"config.json\")):\r\n self.func.write_json(config_data, self.cwd, fname=\"config.json\")", "def configure(self, options, conf):", "def _configure(self):\n pass", "def config(self):\n pass", "def config(self):\n pass", "def default_configs(cls):\n config = super().default_configs()\n config.update(\n {\n \"entry_type\": \"ft.onto.base_ontology.Document\",\n \"model_name\": \"ktrapeznikov/biobert_v1.1_pubmed_squad_v2\",\n \"question\": \"Where do I live\",\n \"max_answer_len\": 15,\n \"cuda_devices\": -1,\n \"handle_impossible_answer\": False,\n }\n )\n return config", "def __init__(self, cfg):\n super(DKInfluxDB, self).__init__(cfg, 'influxdb')", "def configure(self):\n self.data_batch_file = self.get_value_from_config('data_batch_file')\n self.batch_meta_file = self.get_value_from_config('batch_meta_file')\n self.has_background = self.get_value_from_config('has_background')\n self.num_classes = self.get_value_from_config('num_classes')\n self.converted_images_dir = self.get_value_from_config('converted_images_dir')\n if not self.converted_images_dir:\n self.converted_images_dir = self.data_batch_file.parent / 'converted_images'\n self.convert_images = self.get_value_from_config('convert_images')\n # create directory for storing images if it is necessary\n if self.convert_images and not self.converted_images_dir.exists():\n self.converted_images_dir.mkdir(parents=True)\n self.dataset_meta = self.get_value_from_config('dataset_meta_file')", "def _config_options(self):\n self._config_sortable(self._sortable)\n self._config_drag_cols(self._drag_cols)", "def define_user_config(self) -> None:\n self.add_standard_metadata('infiles')\n\n self.add_custom_metadata(name='key_cols',\n short_name='k',\n required=True,\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='compare_cols',\n short_name='c',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='ignore_cols',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='col_names',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='variables',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='already_sorted',\n action='store_const',\n const=True,\n default=False,\n type=bool)\n self.add_custom_metadata(name='already_uniq',\n action='store_const',\n const=True,\n default=False,\n type=bool)\n self.add_custom_metadata(name='temp_dir',\n default=None,\n type=str)\n self.add_custom_metadata(name='out_dir',\n default=None,\n type=str)\n self.add_custom_metadata(name='assignments',\n default=[],\n type=list)\n\n self.add_standard_metadata('verbosity')\n self.add_all_config_configs()\n self.add_all_csv_configs()\n self.add_all_help_configs()", "def apply_config_defaults():\n\n # don't worry about broken settings, validate_config() will take\n # care of them\n\n if 'pre_action_callbacks' not in nori.cfg:\n nori.cfg['pre_action_callbacks'] = [\n (pre_action_drupal_readonly, [], {})\n ]\n\n if 'post_action_callbacks' not in nori.cfg:\n nori.cfg['post_action_callbacks'] = [\n (post_action_drupal_readonly, [], {}, True)\n ]\n\n if 'source_type' not in nori.cfg:\n nori.cfg['source_type'] = 'generic'\n\n if 'source_query_func' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_query_func'] = generic_db_query\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_query_func'] = drupal_db_query\n\n if 'source_query_defaulter' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_query_defaulter'] = (\n apply_generic_arg_defaults\n )\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_query_defaulter'] = None\n\n if 'source_query_validator' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_query_validator'] = validate_generic_args\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_query_validator'] = validate_drupal_args\n\n if 'source_template_change_callbacks' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_template_change_callbacks'] = []\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_template_change_callbacks'] = [\n (drupal_timestamp_callback, [], {})\n ]\n\n if 'source_global_change_callbacks' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_global_change_callbacks'] = []\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_global_change_callbacks'] = [\n (drupal_cache_callback, [], {})\n ]\n\n if 'dest_type' not in nori.cfg:\n nori.cfg['dest_type'] = 'generic'\n\n if 'dest_query_func' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_query_func'] = generic_db_query\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_query_func'] = drupal_db_query\n\n if 'dest_query_defaulter' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_query_defaulter'] = (\n apply_generic_arg_defaults\n )\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_query_defaulter'] = None\n\n if 'dest_query_validator' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_query_validator'] = validate_generic_args\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_query_validator'] = validate_drupal_args\n\n if 'dest_template_change_callbacks' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_template_change_callbacks'] = []\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_template_change_callbacks'] = [\n (drupal_timestamp_callback, [], {})\n ]\n\n if 'dest_global_change_callbacks' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_global_change_callbacks'] = []\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_global_change_callbacks'] = [\n (drupal_cache_callback, [], {})\n ]\n\n if 'templates' not in nori.core.cfg:\n return\n if not isinstance(nori.core.cfg['templates'],\n nori.core.MAIN_SEQUENCE_TYPES):\n return\n\n for i, template in enumerate(nori.core.cfg['templates']):\n if not isinstance(nori.core.cfg['templates'][i],\n nori.core.MAPPING_TYPES):\n continue\n\n if T_MULTIPLE_KEY not in template:\n nori.core.cfg['templates'][i][T_MULTIPLE_KEY] = False\n\n if T_S_QUERY_ARGS_KEY in template:\n args_t = template[T_S_QUERY_ARGS_KEY]\n defaulter = nori.core.cfg['source_query_defaulter']\n if (isinstance(args_t, tuple) and len(args_t) >= 2 and\n isinstance(args_t[0], nori.core.MAIN_SEQUENCE_TYPES) and\n isinstance(args_t[1], nori.core.MAPPING_TYPES) and\n defaulter and callable(defaulter)):\n defaulter(args_t[0], args_t[1])\n\n if T_TO_D_FUNC_KEY not in template:\n nori.core.cfg['templates'][i][T_TO_D_FUNC_KEY] = None\n\n if T_S_NO_REPL_KEY not in template:\n nori.core.cfg['templates'][i][T_S_NO_REPL_KEY] = False\n\n if T_S_CHANGE_CB_KEY not in template:\n nori.core.cfg['templates'][i][T_S_CHANGE_CB_KEY] = []\n\n if T_D_QUERY_ARGS_KEY in template:\n args_t = template[T_D_QUERY_ARGS_KEY]\n defaulter = nori.core.cfg['dest_query_defaulter']\n if (isinstance(args_t, tuple) and len(args_t) >= 2 and\n isinstance(args_t[0], nori.core.MAIN_SEQUENCE_TYPES) and\n isinstance(args_t[1], nori.core.MAPPING_TYPES) and\n defaulter and callable(defaulter)):\n defaulter(args_t[0], args_t[1])\n\n if T_TO_S_FUNC_KEY not in template:\n nori.core.cfg['templates'][i][T_TO_S_FUNC_KEY] = None\n\n if T_D_NO_REPL_KEY not in template:\n nori.core.cfg['templates'][i][T_D_NO_REPL_KEY] = False\n\n if T_D_CHANGE_CB_KEY not in template:\n nori.core.cfg['templates'][i][T_D_CHANGE_CB_KEY] = []\n\n if T_KEY_MODE_KEY not in template:\n nori.core.cfg['templates'][i][T_KEY_MODE_KEY] = 'all'\n\n if T_KEY_LIST_KEY not in template:\n nori.core.cfg['templates'][i][T_KEY_LIST_KEY] = []", "def setUpConfig(self):\n pass", "def configure(self) -> None:", "def configs(self):\n raise NotImplementedError()", "def config(\n data_folder=settings.data_folder,\n logs_folder=settings.logs_folder,\n imgs_folder=settings.imgs_folder,\n cache_folder=settings.cache_folder,\n cache_responses=settings.cache_responses,\n log_file=settings.log_file,\n log_console=settings.log_console,\n log_level=settings.log_level,\n log_name=settings.log_name,\n log_filename=settings.log_filename,\n useful_idf_objects=settings.useful_idf_objects,\n default_weight_factor=\"area\",\n ep_version=settings.ep_version,\n debug=settings.debug,\n):\n # set each global variable to the passed-in parameter value\n settings.cache_responses = cache_responses\n settings.cache_folder = Path(cache_folder).expand().makedirs_p()\n settings.data_folder = Path(data_folder).expand().makedirs_p()\n settings.imgs_folder = Path(imgs_folder).expand().makedirs_p()\n settings.logs_folder = Path(logs_folder).expand().makedirs_p()\n settings.log_console = log_console\n settings.log_file = log_file\n settings.log_level = log_level\n settings.log_name = log_name\n settings.log_filename = log_filename\n settings.useful_idf_objects = useful_idf_objects\n settings.zone_weight.set_weigth_attr(default_weight_factor)\n settings.ep_version = ep_version\n settings.debug = debug\n\n # if logging is turned on, log that we are configured\n if settings.log_file or settings.log_console:\n get_logger(name=\"archetypal\")\n log(\"Configured archetypal\")", "def _initConfig(self):\n from tg import config as tg_config\n\n # Set config defaults\n config = DEFAULT_CONFIG.copy()\n temp_verbose = config[\"verbose\"]\n\n # Configuration file overrides defaults\n default_config_file = os.path.abspath(DEFAULT_CONFIG_FILE)\n config_file = tg_config.get('wsgidav.config_path', default_config_file)\n fileConf = self._readConfigFile(config_file, temp_verbose)\n config.update(fileConf)\n\n if not useLxml and config[\"verbose\"] >= 1:\n print(\n \"WARNING: Could not import lxml: using xml instead (slower). Consider installing lxml from http://codespeak.net/lxml/.\")\n from wsgidav.dir_browser import WsgiDavDirBrowser\n from tracim.lib.webdav.tracim_http_authenticator import TracimHTTPAuthenticator\n from wsgidav.error_printer import ErrorPrinter\n from tracim.lib.webdav.utils import TracimWsgiDavDebugFilter\n\n config['middleware_stack'] = [\n WsgiDavDirBrowser,\n TracimHTTPAuthenticator,\n ErrorPrinter,\n TracimWsgiDavDebugFilter,\n ]\n\n config['provider_mapping'] = {\n config['root_path']: Provider(\n # TODO: Test to Re enabme archived and deleted\n show_archived=False, # config['show_archived'],\n show_deleted=False, # config['show_deleted'],\n show_history=False, # config['show_history'],\n manage_locks=config['manager_locks']\n )\n }\n\n config['domaincontroller'] = TracimDomainController(presetdomain=None, presetserver=None)\n\n return config", "def config():\n config_django()\n config_svisor()", "def config( **kwargs ):", "def _configure(self):\n path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'config.yml'\n )\n\n with open(path) as file:\n defaultconfig = yaml.load(file)\n\n self.config = merge_dict(self.config, defaultconfig)\n\n if 'logging' in self.config:\n logging.config.dictConfig(self.config['logging'])\n else:\n logging.getLogger('sirbot').setLevel('INFO')", "def config():", "def config():", "def default_configs(cls):\n config = super().default_configs()\n config.update(\n {\n \"entry_type\": None,\n \"attribute\": None,\n \"index_annotation\": None,\n }\n )\n return config", "def config(self):\n raise NotImplementedError", "async def _configure_plugins(self) -> None:\n logger.debug('Configuring plugins')\n funcs = [\n info['plugin'].configure(\n config=info['config'],\n session=self._session,\n router=self.app.router\n )\n for info in self._plugins.values()\n ]\n\n if funcs:\n await asyncio.gather(*funcs, loop=self._loop)\n logger.debug('Plugins configured')", "def get_default_config(self):\n config = super(DropwizardCollector, self).get_default_config()\n config.update({\n 'url': DEFAULT_METRICS_URL,\n 'path': 'dropwizard'\n })\n return config", "def configure(self, conf):\n return", "def config_init(self):\n\n game_opts = [\n\n # Execution Options\n ('debug',False), # Toggle Debug Messaging\n ('log_path',False), # Turn on logging (w/path)\n ('log_lvl',logging.DEBUG), # Set log level\n\n # World Generation Options\n ('flex_limit',3) # Sets the maximum variance\n\n ]\n\n # Attempts to pull each value from the configuration\n # if not in config, the default value defined above\n # is set instead\n for opt in game_opts:\n try:\n setattr(self,opt[0],self.conf.conf_dict[opt[0]])\n except:\n setattr(self,opt[0],opt[1])\n continue", "def get_default_config(cls):\n default = super(LSHNearestNeighborIndex, cls).get_default_config()\n\n lf_default = plugin.make_config(get_lsh_functor_impls())\n default['lsh_functor'] = lf_default\n\n di_default = plugin.make_config(get_descriptor_index_impls())\n default['descriptor_index'] = di_default\n\n hi_default = plugin.make_config(get_hash_index_impls())\n default['hash_index'] = hi_default\n default['hash_index_comment'] = \"'hash_index' may also be null to \" \\\n \"default to a linear index built at \" \\\n \"query time.\"\n\n h2u_default = plugin.make_config(get_key_value_store_impls())\n default['hash2uuids_kvstore'] = h2u_default\n\n return default", "def init_config(self):\n super().init_config()\n for param in self.parameters():\n if param.name == 'source':\n continue\n self.add_config_item(param.name,\n saver=lambda p=param: getattr(p, \"value\"),\n loader=lambda x, p=param: setattr(p, \"value\", x),\n default=param.default)", "def initialize_from_config(self):", "def default_configs(cls):\n config = super().default_configs()\n config.update({\"model\": \"openie\"})\n return config", "def _config_classes(self):\n pass", "def configure(self):\n if Config().is_edge_server():\n logging.info(\"Configuring edge server #%d as a %s server.\",\n Config().args.id,\n Config().algorithm.type)\n logging.info(\"Training with %s local aggregation rounds.\",\n Config().algorithm.local_rounds)\n\n if hasattr(Config().server, 'do_test'):\n if not Config().clients.do_test or Config().server.do_test:\n datasource = datasources_registry.get()\n self.testset = datasource.get_test_set()\n\n self.load_trainer()\n\n if hasattr(Config(), 'results'):\n result_dir = Config().result_dir\n result_csv_file = f'{result_dir}/result_{Config().args.id}.csv'\n csv_processor.initialize_csv(result_csv_file,\n self.recorded_items, result_dir)\n\n else:\n super().configure()\n\n if hasattr(Config().server, 'do_test'):\n if Config().clients.do_test and Config().server.do_test:\n datasource = datasources_registry.get()\n self.testset = datasource.get_test_set()", "def setdefaults(self):\n self.config = {\n 'dbuser': Infopage.DEFAULT_DBUSER,\n 'dbname': Infopage.DEFAULT_DBNAME,\n 'dbpassword': Infopage.DEFAULT_DBPASSWORD,\n 'dbhost': Infopage.DEFAULT_DBHOST\n }", "def configure(self, options, conf):\n pass", "def configure(cls):\n pass", "def get_default_config(self):\n config = super(UserScriptsCollector, self).get_default_config()\n config.update( {\n 'path': '.',\n 'scripts_path': '/etc/diamond/user_scripts/',\n 'method': 'Threaded',\n } )\n return config", "def get_default_config(self):\n config = super(EndecaDgraphCollector, self).get_default_config()\n config.update({\n 'path': 'endeca.dgraph',\n 'host': 'localhost',\n 'port': 8080,\n 'timeout': 1,\n })\n return config", "def load_config(self):\n pass", "def _init_config_(self):\n self._config= {}", "def setup(self):\n\n default_config = self.read()\n\n self.write(default_config)", "def _setupConfigAnnotation(self):\n annotations = IAnnotations(self)\n settings = annotations.get(\"PLOMINOFIELDCONFIG\", None)\n if not settings:\n annotations[\"PLOMINOFIELDCONFIG\"] = PersistentDict()", "def _load_common_config(self, config: Dict[str, Any]) -> Dict[str, Any] :\n # Log level\n if 'loglevel' in self.args.loglevel:\n config.update({'verbosity': self.args.loglevel})\n else:\n config.update({'verbosity': 0})\n logging.basicConfig(\n level=logging.INFO if config['verbosity'] < 1 else logging.DEBUG,\n format= '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n set_loggers(config['verbosity'])\n logger.info('Verbosity set to %s', config['verbosity'])\n\n # Add dynamic whitelist if found\n if 'dynamic_whitelist' in self.args and self.args.dynamic_whitelist:\n config['pairlist'] = {\n 'method': 'VolumePairList',\n 'config': {'number_assets': self.args.dynamic_whitelist}\n }\n logger.warning(\n 'Parameter --dynamic-whitelist has been deprecated, '\n 'and will be completely replaced by the whitelist dict in the future. '\n 'For now: using dynamically generated whitelist based on VolumePairList. '\n '(not applicable with Backtesting and Hyperopt)'\n )\n if self.args.db_url and self.args.db_url != constant.DEFAULT_DB_PROD_URL:\n config.update({'db_url': self.args.db_url})\n logger.info('Parameter --db-url detected ...')\n\n if config.get('dry_run', False):\n logger.info('Dry run is enabled')\n if config.get('db_url') in [None, constant.DEFAULT_DB_PROD_URL]:\n # Default to in-memory db for dry_run if not specified\n config['db_url'] = constant.DEFAULT_DB_DRYRUN_URL\n else:\n if not config.get('db_url', None):\n config['db_url'] = constant.DEFAULT_DB_PROD_URL\n logger.info('Dry run is disabled')\n\n if config.get('forcebuy_enable', False):\n logger.warning('`forcebuy` RPC message enabled.')\n\n # Setting max_open_trades to infinite if -1\n if config.get('max_open_trades') == -1:\n config['max_open_trades'] = float('inf')\n\n logger.info(f'Using DB: \"{config[\"db_url\"]}\"')\n\n # Check if the exchange set by the user is supported\n self.check_exchange(config)\n\n return config", "def configure(self):\n # Every single node produces node stats\n self._init_local_node_stats_publisher()\n\n if self._track_processes:\n # Processes stats are optional\n self._init_local_processes_stats_publisher()\n else:\n self._stub_processes_stats_routes()\n\n if self._is_lb:\n # Load balancer node also provides proxies stats\n self._init_local_proxies_stats_publisher()\n else:\n self._stub_proxies_stats_routes()\n\n if self._is_master:\n # Master collects stats from all nodes and provides API for access\n self._init_cluster_node_stats_publisher()\n if self._track_processes:\n self._init_cluster_processes_stats_publisher()\n self._init_cluster_proxies_stats_publisher()\n else:\n self._stub_cluster_stats_routes()", "def configure(self, args):\n pass", "def setUp(self):\n\n self._hash_bins = 10\n self._embedding_dim = 2\n\n self._default_config = {\n \"hash_bins\": self._hash_bins,\n \"embedding_dim\": self._embedding_dim\n }", "def default_configs(cls):\n return {\n 'redirect_path': None,\n 'nif_page_structure': None,\n 'nif_text_links': None,\n }", "def setUp(self):\n\n self._hash_bins = 10\n self._hash_embedding_dim = 4\n self._embedding_dim = 2\n\n self._default_config = {\n \"hash_bins\": self._hash_bins,\n \"hash_embedding_dim\": self._hash_embedding_dim,\n \"embedding_dim\": self._embedding_dim\n }", "def set_default_configs(self):\n\n raise Exception(\"Child classes must override set_default_configs().\")", "def default_configs(cls):\n config = super().default_configs()\n config['file_ext'] = '.txt'\n return config", "def get_default_config(self):\r\n config = super(CMDCollector, self).get_default_config()\r\n config.update({\r\n 'enabled': 'True',\r\n 'fs': ',',\r\n 'timeout': 300,\r\n })\r\n return config", "def set_missing_defaults(self):\n if 'pub_options' not in self.config:\n self.config['pub_options'] = {\n 'acknowledge': True,\n 'retain': True\n }\n\n if 'sub_options' not in self.config:\n self.config['sub_options'] = {\n 'get_retained': False\n }\n\n if 'subscribed_topics' not in self.config:\n self.config['subscribed_topics'] = None\n\n if 'replay_events' not in self.config:\n self.config['replay_events'] = False\n\n if 'max_reconnect_retries' not in self.config:\n self.config['max_reconnect_retries'] = 10", "def _set_instance_config(self):\n\t\t\n\t\tif \"PARAMETERS_NAME\" in self.config.keys():\n\t\t\tlogger.info(\"You specified your own PARAMETERS_NAME, I will use it.\")\n\t\telse:\n\t\t\tself.config[\"PARAMETERS_NAME\"] = self._get_params_filepath()\n\t\t\n\t\tif \"FILTER_NAME\" in self.config.keys():\n\t\t\tlogger.info(\"You specified your own FILTER_NAME, I will use it.\")\n\t\telse:\n\t\t\tself.config[\"FILTER_NAME\"] = self._get_conv_filepath()\n\t\t\n\t\t\n\t\tif \"CATALOG_NAME\" in self.config.keys():\n\t\t\tlogger.warning(\"You specified your own CATALOG_NAME, but I will *NOT* use it !\")\n\t\t\tdel self.config[\"CATALOG_NAME\"]\n\n\t\tif \"PSF_NAME\" in self.config.keys():\n\t\t\tlogger.info(\"You specified your own PSF_NAME, I will use it.\")\n\t\telse:\n\t\t\tself.config[\"PSF_NAME\"] = self._get_psf_filepath()", "def configure(self):\n\n log.info(\"Loading configuration from the database...\")\n settings = dict(db.query(\"\"\"SELECT `key`, `value` FROM settings\"\"\"))\n \n log.info(\"Config loaded\")\n log.info(\"HoN Version: %s Chat Port: %s Protocol: %s\" % (settings['honver'], settings['chatport'], settings['chatver']))\n if 'username' in settings:\n acc_config['username'] = settings['username']\n \n if 'password' in settings:\n acc_config['password'] = settings['password']\n \n if 'invis' in settings:\n settings['invis'] = True if settings['invis'] == \"True\" else False\n \n if 'chatport' in settings:\n settings['chatport'] = int(settings['chatport'])\n \n if 'chatver' in settings:\n settings['chatver'] = int(settings['chatver'])\n \n for key in settings:\n if key in basic_config:\n basic_config[key] = settings[key]\n \n self._configure(chatport=settings['chatport'], protocol=settings['chatver'], invis=settings['invis'],\n masterserver=settings['masterserver'], basicserver=settings['basicserver'], honver=settings['honver'])", "def configure(self):\n inject(self.urls, self.names_for(\"urls\"))\n inject(self.models, self.names_for(\"models\"))\n self.load_admin()", "def _define_settings(self):\n\n self.settings = {}\n\n ##### ORIGINALLY IN THE DOMAIN FILE #######\n\n # Maximum input in the C-Space : no constituent can be more than 100% present\n self.settings['maxInp'] = 1\n\n #### ORIGINALLY IN THE SETTINGS FILE #####\n self.settings[\"epochs\"] = 3 # Training epochs\n self.settings[\"tgtStd\"] = 12e-6\n self.settings['TInit'] = 1e-6\n self.settings[\"TMin\"] = 0\n self.settings[\"TDecayRate\"] = 0.05\n self.settings[\"lambdaInit\"] = 0.011387\n self.settings['lambdaMin'] = 0.0001\n self.settings[\"lambdaDecayRate\"] = 0.60\n self.settings[\"maxSteps\"] = 300000\n self.settings[\"emaSpeedTol\"] = 0.009\n self.settings[\"emaFactor\"] = .005\n self.settings[\"printInterval\"] = 3000\n self.settings[\"summary_file\"] = \"data/summary.txt\"\n mean = torch.ones(self.grammar.bind.nF,\n self.grammar.bind.nR)/self.grammar.bind.nF\n self.settings[\"initStateMean\"] = mean\n self.settings[\"initStateStdev\"] = .025\n self.settings['clamp'] = False\n\n if self.custom_settings is not None:\n for key, value in self.custom_settings.items():\n if key in self.settings:\n self.settings[key] = value", "def __import(self):\n configured_plugins = self.config.get('plugins', {}).copy()\n plugin_module_files = self.__find_plugin_modules(configured_plugins)\n self.__initialize_plugins(configured_plugins, plugin_module_files)", "def initConfiguration():\n UTIL.SYS.s_configuration.setDefaults([\n [\"SYS_COLOR_LOG\", \"1\"],\n [\"HOST\", \"127.0.0.1\"],\n [\"NCTRS_TM_SERVER_PORT\", \"2502\"],\n [\"NCTRS_TM_DU_VERSION\", \"V0\"],\n [\"SPACECRAFT_ID\", \"758\"]])", "def manage_config() -> dict:\n required_args = {\"embedding_size\", \"hidden_size\", \"num_layers\", \"corpus_dir\"}\n arg_groups = {\n \"general\": {\"recoding_type\"},\n \"model\": {\"embedding_size\", \"hidden_size\", \"num_layers\", \"dropout\"},\n \"train\": {\"weight_decay\", \"learning_rate\", \"batch_size\", \"num_epochs\", \"clip\", \"print_every\", \"eval_every\",\n \"model_save_path\", \"device\", \"model_name\"},\n \"logging\": {\"log_dir\"},\n \"corpus\": {\"corpus_dir\", \"max_seq_len\"},\n \"recoding\": {\"step_type\", \"num_samples\", \"mc_dropout\", \"prior_scale\", \"hidden_size\", \"weight_decay\",\n \"data_noise\", \"share_anchor\", \"use_cross_entropy\"},\n \"step\": {\"predictor_layers\", \"window_size\", \"step_size\", \"hidden_size\"}\n }\n argparser = init_argparser()\n config_object = ConfigSetup(argparser, required_args, arg_groups)\n config_dict = config_object.config_dict\n\n return config_dict", "def setUp(self):\n self._default_call_inputs = (\n np.array([[1,2,3], [4,5,6]]),\n None\n )\n\n self._hash_bins = 10\n self._hash_embedding_dim = 4\n self._embedding_dim = 2\n self._masking = False\n\n self._default_config = {\n \"hash_bins\": self._hash_bins,\n \"hash_embedding_dim\": self._hash_embedding_dim,\n \"embedding_dim\": self._embedding_dim,\n \"masking\": self._masking\n }", "def configure_step(self):\n\n pass", "def main():\n # TODO. Allow to specify configuration location.\n allConfigs = {\"HOSTS\": {}}\n mainConfig = getConfig(['netdata-grafana-hosts.conf'])\n allConfigs['backend'] = mainConfig.get('global', 'backend')\n allConfigs['grafanaUrl'] = mainConfig.get('global', 'grafanaUrl')\n if allConfigs['backend'] == 'opentsdb':\n allConfigs['opentsdb'] = {\"datasource\": mainConfig.get('opentsdb', 'datasource'),\n \"order\": checkOrderConfig(mainConfig, 'opentsdb'),\n \"skipOthers\": checkSkipOthers(mainConfig, 'opentsdb')}\n allConfigs['opentsdb']['title'] = mainConfig.get('opentsdb', 'title')\n allConfigs['opentsdb']['description'] = mainConfig.get('opentsdb', 'description')\n allConfigs['opentsdb']['dimensionids'] = mainConfig.getboolean('opentsdb', 'dimensionids')\n allConfigs['opentsdb']['prefix'] = mainConfig.get('opentsdb', 'prefix')\n allConfigs['opentsdb']['tags'] = getTags(mainConfig, 'opentsdb')\n allConfigs['opentsdb']['customfilters'] = json.loads(mainConfig.get('opentsdb', 'customfilters'))\n # get customFirstRow and customLastRow\n allConfigs['opentsdb']['customFirstRow'] = getValFromConfig(mainConfig, 'opentsdb', 'customFirstRow')\n allConfigs['opentsdb']['customLastRow'] = getValFromConfig(mainConfig, 'opentsdb', 'customLastRow')\n for sectionName in mainConfig.sections():\n if sectionName in ['global', 'opentsdb']:\n continue\n # check if mandatory options are in place\n if not(mainConfig.has_option(sectionName, 'hostname') and\n mainConfig.get(sectionName, 'hostname')):\n print 'In section %s hostname is not defined. It is mandatory to define full url' % sectionName\n print '* Skipping this node check.'\n continue\n if allConfigs['backend'] == 'graphite':\n if not(mainConfig.has_option(sectionName, 'datasource') and\n mainConfig.get(sectionName, 'datasource')):\n print 'In section %s dataspirce is not defined. It is mandatory to define datasource' % sectionName\n print '* Skipping this node check.'\n continue\n configSuccess, config = getNetdataConfig(mainConfig, sectionName)\n if not configSuccess:\n config['SKIP_NODE'] = False # This is not looked in case of graphite. TODO\n config['tags'] = getTags(mainConfig, allConfigs['backend'])\n if allConfigs['backend'] == 'graphite':\n # This is relevant only for graphite\n config['datasource'] = mainConfig.get(sectionName, 'datasource')\n config['order'] = checkOrderConfig(mainConfig, sectionName)\n config['skipOthers'] = checkSkipOthers(mainConfig, sectionName)\n config['hostname'] = mainConfig.get(sectionName, 'hostname')\n config['section'] = sectionName\n # get customFirstRow and customLastRow\n config['customFirstRow'] = getValFromConfig(mainConfig, sectionName, 'customFirstRow')\n config['customLastRow'] = getValFromConfig(mainConfig, sectionName, 'customLastRow')\n allConfigs[\"HOSTS\"][config['hostname']] = config\n print allConfigs\n # Now send allConfigs to a specific backend preparator.\n if allConfigs['backend'] == 'graphite':\n graphiteDashboard(allConfigs)\n elif allConfigs['backend'] == 'opentsdb':\n opentsdbDashboard(allConfigs)\n else:\n print 'Unknown backend type... Exiting'", "def configure_reporting(self, config: ConfigDict):\n\n # Determine logging configuration\n if \"log\" in config:\n self.logflag: bool = config[\"log\"]\n if self.logflag:\n self.itstat_object, self.itstat_insert_func = stats_obj()\n else:\n self.logflag = False\n\n # Determine checkpointing configuration\n if \"workdir\" in config:\n self.workdir: str = config[\"workdir\"]\n else:\n self.workdir = \"./\"\n\n if \"checkpointing\" in config:\n self.checkpointing: bool = config[\"checkpointing\"]\n else:\n self.checkpointing = False\n\n # Determine variable to return at end of training\n if \"return_state\" in config:\n # Returning Flax train state\n self.return_state = config[\"return_state\"]\n else:\n # Return model variables\n self.return_state = False", "def setup_config(self, args=None):\n self.config_parse(args=args)", "def config():\n file_path = None # path to the input file\n db_path = None # path to the output db\n atomic_properties = (\n \"Properties=species:S:1:pos:R:3\"\n ) # atomic properties of the input file\n molecular_properties = [\"energy\"] # molecular properties of the input file\n overwrite = False", "def get_default_config(self):\n config = super(InterruptCollector, self).get_default_config()\n config.update({\n 'path': 'interrupts'\n })\n return config", "def configure(self, *args, **kwargs):\n raise NotImplementedError()", "def configure(self, *args, **kwargs):\n raise NotImplementedError()", "def configure(self, *args, **kwargs):\n raise NotImplementedError()", "def default_config(cls):\n\n config = {\n \"checkpoint_path\": \"\", # path to model checkpoint\n \"separated_audio_folder\": \"\" # path to folder where to save the separated audio tracks.\n }\n return config", "def init_cfg(self):\n # read the config dict\n self.cfg = config_json.cfg_open(self.work_dir)\n # default three sections\n self.cfg.setdefault('param', {})\n self.cfg.setdefault('info', {})\n self.cfg.setdefault('meta', {})", "def _augment_pipeline_cfg(self):", "def _init_config(self):\n self.config = self.config_template.specialize()\n print('MMH CONFIG:\\n' + str(self.config))", "def get_default_config(self):\n config = super(NumaCollector, self).get_default_config()\n config.update(\n {\n \"path\": \"numa\",\n \"bin\": self.find_binary(\"numactl\"),\n }\n )\n\n return config", "def configure(self):\n # Defaults\n self.db_type = DB_TYPE.POSTGRES\n self.db_name = \"ambari\"\n self.db_user = \"ambari\"\n self.db_password = \"bigdata\"\n self.db_host = \"localhost\"\n self.db_url = None\n\n if os.path.exists(AMBARI_PROPERTIES_LOCATION):\n self.ambari_props = self.read_conf_file(AMBARI_PROPERTIES_LOCATION)\n\n if \"server.jdbc.database\" in self.ambari_props:\n self.db_type = self.ambari_props[\"server.jdbc.database\"].upper()\n if \"server.jdbc.database_name\" in self.ambari_props:\n self.db_name = self.ambari_props[\"server.jdbc.database_name\"]\n if \"server.jdbc.user.name\" in self.ambari_props:\n self.db_user = self.ambari_props[\"server.jdbc.user.name\"]\n if \"server.jdbc.user.passwd\" in self.ambari_props:\n self.db_password = self.read_file(self.ambari_props[\"server.jdbc.user.passwd\"])\n if \"server.jdbc.hostname\" in self.ambari_props:\n self.db_host = self.ambari_props[\"server.jdbc.hostname\"]\n if \"server.jdbc.url\" in self.ambari_props:\n self.db_url = self.ambari_props[\"server.jdbc.url\"]\n if \"ambari-server.user\" in self.ambari_props:\n self.ambari_server_user = self.ambari_props[\"ambari-server.user\"]\n\n #Logger.info(\"Using database type: {0}, name: {1}, host: {2}\".format(self.db_type, self.db_name, self.db_host))\n connection_string = \"dbname='{0}' user='{1}' host='{2}' password='{3}'\".format(self.db_name, self.db_user, self.db_host, self.db_password)\n\n if self.db_type == DB_TYPE.POSTGRES:\n try:\n import psycopg2 # covered by GNU Lesser General Public License\n except Exception, e:\n Logger.error(\"Need to install python-psycopg2 package for Postgres DB. E.g., yum install python-psycopg2\\n\")\n self.terminate()\n elif self.db_type == DB_TYPE.MYSQL:\n try:\n import pymysql # covered by MIT License\n except Exception, e:\n Logger.error(\"Need to install PyMySQL package for Python. E.g., yum install python-setuptools && easy_install pip && pip install PyMySQL\\n\")\n self.terminate()\n else:\n Logger.error(\"Unknown database type: {0}.\".format(self.db_type))\n self.terminate()\n\n self.conn = None\n self.cursor = None\n try:\n Logger.debug(\"Initializing database connection and cursor.\")\n if self.db_type == DB_TYPE.POSTGRES:\n self.conn = psycopg2.connect(connection_string)\n self.cursor = self.conn.cursor()\n elif self.db_type == DB_TYPE.MYSQL:\n self.conn = pymysql.connect(self.db_host, self.db_user, self.db_password, self.db_name)\n self.cursor = self.conn.cursor()\n\n Logger.debug(\"Created database connection and cursor.\")\n self.cursor.execute(\"SELECT metainfo_key, metainfo_value FROM metainfo WHERE metainfo_key='version';\")\n rows = self.cursor.fetchall()\n if rows and len(rows) == 1:\n self.ambari_version = rows[0][1]\n # Logger.info(\"Connected to database!!! Ambari version is {0}\\n\".format(self.ambari_version))\n\n # Must be Ambari 2.0.0 or higher\n if self.compare_versions(self.ambari_version, MIN_AMBARI_VERSION) < 0:\n Logger.error(\"Must be running Ambari Version {0} or higher.\\n\".format(MIN_AMBARI_VERSION))\n self.terminate()\n else:\n Logger.error(\"Unable to determine Ambari version.\")\n self.terminate()\n\n self.set_cluster()\n except Exception, e:\n Logger.error(\"I am unable to connect to the database. Error: {0}\\n\".format(e))\n self.terminate()\n else:\n raise Exception(\"Could not find file {0}\".format(AMBARI_PROPERTIES_LOCATION))", "def __init__(self, config):\n super().__init__(config)\n self.collector_host = config.get(\"collector_host\")\n self.schedds = config.get(\"schedds\", [None])\n self.condor_config = config.get(\"condor_config\")\n self.constraint = config.get(\"constraint\", True)\n self.classad_attrs = config.get(\"classad_attrs\")\n self.correction_map = config.get(\"correction_map\")", "def _setup_pipeline_cfg(self):", "def configure(self, options, config):\n Plugin.configure(self, options, config)\n self.config = config\n if self.enabled:\n self.xunitstream = globalxunitstream\n self.xunitstats = globalxunitstats\n for i in range(4):\n self.xunitstats[i] = 0\n self.xunit_file = options.xunit_file\n self.xunit_header = options.xunit_header", "def defineConfiguration(self):\n def validatePath(value):\n v = olof.tools.validation.parseString(value)\n return v.rstrip().rstrip('/')\n\n options = []\n\n o = olof.configuration.Option('log_directory')\n o.setDescription('Path of the directory where the data will be stored. This can be absolute or relative.')\n o.addValue(olof.configuration.OptionValue('olof/plugins/logger', default=True))\n o.setValidation(validatePath)\n o.addCallback(self.clearScanSetups)\n options.append(o)\n\n o = olof.configuration.Option('enable_lag_logging')\n o.setDescription('Write a separate logfile with detailed timestamps when a detection was registered and ' + \\\n 'received. Useful for analysing connection lag or performance.')\n o.addValue(olof.configuration.OptionValue(False, default=True))\n o.addValue(olof.configuration.OptionValue(True))\n o.addCallback(self.updateLagConfig)\n options.append(o)\n\n return options", "def config(self):\n\n train_dataset = RandomClassificationDataset()\n eval_dataset = RandomClassificationDataset()\n\n return {\n 'model':\n SimpleModel(),\n 'train_dataloader':\n DataLoader(\n dataset=train_dataset,\n batch_size=4,\n sampler=dist.get_sampler(train_dataset),\n ),\n 'eval_dataloader':\n DataLoader(\n dataset=eval_dataset,\n sampler=dist.get_sampler(eval_dataset),\n ),\n 'max_duration':\n '2ep',\n 'autoresume':\n True,\n 'loggers': [],\n }", "def configure_step(self):\n pass", "def _set_config(self):\n\n self.config.data_path = \"http://{0}:{1}/db/data\".format(\n self.config.host,\n self.config.port)\n\n self.config.node_path = \"/\".join([self.config.data_path, \"node\"])\n self.config.headers = dict([])\n self.config.headers[\"get\"] = {\"Accept\": \"application/json\"}\n self.config.headers[\"put\"] = {\"Content-Type\": \"application/json\"}", "def parse_config(self):\n # TODO: parse config file\n pass", "def _set_config():\n\n\tdebug_msg = \"load default config yaml file\"\n\tlogger.debug(debug_msg)\n\n\tconfig_file_parser(paths.CONFIG_FILE, override_options=True)", "def init_configs(self):\n\n # get current location\n self.script_dir = os.path.dirname(__file__)\n\n # load configuration file\n with open(os.path.join(self.script_dir, \"config.json\")) as f:\n self.configs = json.load(f)\n \n # load some configs as attributes\n self.resource_folder = os.path.join(self.script_dir, self.configs[\"resource_path\"], self.resource_type, self.language)\n self.pre_processed_folder = os.path.join(self.resource_folder, self.configs[\"pre_processed_path\"])\n self.results_folder = os.path.join(self.resource_folder, self.configs[\"results_path\"])\n self.chunk_size = self.configs[\"resources\"][self.resource_type][\"chunk_size\"]", "def _configure(self):\n dconfig = DConfiguration(self._le2mserv.gestionnaire_graphique.screen)\n if dconfig.exec_():\n pms.TEMPS_PARTIE, pms.TREATMENT, pms.GRILLES = dconfig.get_config()\n self._le2mserv.gestionnaire_graphique.infoserv(\n [trans_TC(u\"Part time: {}\").format(pms.TEMPS_PARTIE),\n trans_TC(u\"Treatment: {}\").format(pms.get_treatment(pms.TREATMENT)),\n trans_TC(u\"Grids: {}\").format(len(pms.GRILLES))])", "def __initConfiguration(self):\n conf = configparser.ConfigParser()\n with open(self.configFile, \"r\") as f:\n conf.readfp(f)\n self.orgConf = conf\n # check additionalSection\n adSection = self.additionalSection\n if adSection in conf:\n adSection = conf[adSection]\n self.conf = {}\n for i in [self.CLIENT_ID, self.CLIENT_SECRET, self.AUTHZ_ENDPOINT,\n self.TOKEN_ENDPOINT, self.REDIRECT_URI, self.SCOPE]:\n if adSection != None and i in adSection:\n self.conf[i] = adSection[i]\n else:\n self.conf[i] = conf[\"DEFAULT\"][i]", "def settings_init(self):\n config_console = configparser.ConfigParser()\n config_console.read(CONFIG_FILE_NAME)\n self.logmode = config_console[\"LOG\"][\"log_mode\"]" ]
[ "0.6318215", "0.6309264", "0.62523276", "0.62523276", "0.62523276", "0.62523276", "0.62494737", "0.62309724", "0.6225098", "0.6225098", "0.6183282", "0.6180916", "0.6179421", "0.6114876", "0.6098103", "0.60826004", "0.60826004", "0.6071773", "0.6046305", "0.60166687", "0.60119224", "0.599236", "0.59749216", "0.5965325", "0.5924659", "0.5921973", "0.59173036", "0.5916264", "0.58941644", "0.5879194", "0.58756626", "0.58734554", "0.58734554", "0.5867089", "0.5863568", "0.5859884", "0.5859758", "0.5847017", "0.582378", "0.58177936", "0.58093876", "0.5801979", "0.57862836", "0.57788515", "0.57708645", "0.57643086", "0.57562953", "0.57455504", "0.57444", "0.5743522", "0.5705567", "0.5702903", "0.56953585", "0.5679244", "0.5662277", "0.5646333", "0.56382406", "0.5637531", "0.5620233", "0.5616249", "0.55967855", "0.5594013", "0.5583551", "0.5557324", "0.5555136", "0.5537025", "0.553451", "0.5521721", "0.5518418", "0.5517622", "0.550818", "0.5504535", "0.5491989", "0.5485359", "0.5482448", "0.5475766", "0.5470979", "0.5470182", "0.54701406", "0.54701406", "0.54701406", "0.54641575", "0.5462325", "0.5449292", "0.54413605", "0.54314935", "0.542787", "0.54213345", "0.54213125", "0.54083055", "0.5407241", "0.5400262", "0.53993285", "0.5391753", "0.5388153", "0.53880847", "0.5382357", "0.53810406", "0.5370741", "0.536956" ]
0.6801265
0
Save a piece of data in the configuration directory.
def save_data(data: str, data_name: str): with open(config_path / data_name, "w") as f: f.write(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self):\n try:\n with open(self._filename, 'w') as conf_file:\n conf_file.write(json.dumps(self._data))\n except OSError:\n _LOGGER.exception(\"Can't store config in %s\", self._filename)", "def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()", "def save(data, section): # save a config\n\tglobal _timesSaved\n\tif dynConfig['logConfigActions']:\n\t\tlogger.info( f'saving {section}: {data}' )\n\t# save\n\tif section != 'placeholderForSaving':\n\t\tcurrentConfigData[section] = data\n\t\tlogger.debug( f'saved {section}' )\n\telse:\n\t\t_timesSaved = 2\n\t# save to disk if this is the third save\n\tif _timesSaved == 0 or _timesSaved == 1:\n\t\t_timesSaved += 1\n\telse:\n\t\t_timesSaved = 0\n\t\ttry:\n\t\t\t# save to disk\n\t\t\twith open( configPath, 'w', encoding='utf-8' ) as file:\n\t\t\t\tjson.dump( currentConfigData, file, indent=4 )\n\t\texcept:\n\t\t\tlogger.error( f'failed to save config to disk!' )\n\t\t\traise ConfigError( 'error while saving the config file' )", "def save(self):\r\n with open(self.filename, 'wb') as configfile:\r\n self.write(configfile)", "def save_to_conf(self):\n raise NotImplementedError", "def save_config(config_path: str, data: dict):\n with open(config_path, 'w') as j:\n dump(data,j)", "def save_to_conf(self):\r\n raise NotImplementedError", "def save(self, config_path):\n raise NotImplementedError()", "def save(self):\n with open(self._config, 'w') as f:\n json.dump(self.data, f, indent=2, sort_keys=True)", "def save():\n\t\ttry:\n\t\t\t#paths\n\t\t\tConfig.conf.set('paths', 'source_dir', Config.source_dir)\n\t\t\tConfig.conf.set('paths', 'lyrics_dir', Config.lyrics_dir)\n\n\t\t\t#actions\n\t\t\tConfig.setBool('actions', 'save_to_file', Config.save_to_file)\n\t\t\tConfig.setBool('actions', 'save_to_tag', Config.save_to_tag)\n\n\t\t\t#sources\n\t\t\tConfig.setBool('sources', 'lyric_wikia', Config.lyric_wikia)\n\t\t\tConfig.setBool('sources', 'musix_match', Config.musix_match)\n\t\t\tConfig.setBool('sources', 'lyricsmode', Config.lyricsmode)\n\t\t\tConfig.setBool('sources', 'az_lyrics', Config.az_lyrics)\n\n\t\t\twith open(Config.config_path, 'w') as configfile:\n\t\t\t\tConfig.conf.write(configfile)\n\t\t\treturn True\n\n\t\t# Catch all config parser errors\n\t\texcept BaseConfigParserError as e:\n\t\t\tprint('Unable to save settings to config.')\n\t\t\tprint(e)\n\t\t\treturn False\n\n\t\t# Catch file handling errors\n\t\texcept IOError as e:\n\t\t\tprint('Unable to save settings to config.')\n\t\t\tprint(e)\n\t\t\treturn False", "def save_configurations(self):\n # Get the file path\n self.data_path = self.data_path_entry.get()\n # Open the file\n with open(self.data_path, 'rb') as file:\n self.log('Opened ' + str(self.data_path))\n # Un-serialize\n info = pickle.load(file)\n # Write the new properties\n self.main_window.overwrite_properties(info)\n\n self.exit()", "def saveConfig(self):\n newPath = self.newFolderPath.text()\n config.set(\"saveLocation\", str(newPath))\n config.save()\n self.reloadSettings()", "def save_data(self):\n # Validate\n try:\n self._data = self._schema(self._data)\n except vol.Invalid as ex:\n _LOGGER.error(\"Can't parse data: %s\",\n humanize_error(self._data, ex))\n\n # Load last valid data\n _LOGGER.warning(\"Reset %s to last version\", self._file)\n self.read_data()\n return\n\n # write\n try:\n write_json_file(self._file, self._data)\n except (OSError, json.JSONDecodeError) as err:\n _LOGGER.error(\"Can't store config in %s: %s\", self._file, err)", "def save(self):\n for p, c in self.configs_:\n c.write(p)", "def save_config(self):\n data = json.dumps(self.cfg)\n\n try:\n file = open(self.cfg_file_name, 'w')\n file.write(data)\n except OSError as err:\n print(\"can't save property: {0}\".format(err))\n else:\n file.close()", "def save_config(self):\n config.save_config(self.config, self.config_file)", "def save_config(self):\n if not os.path.exists(self._conf_dir):\n os.makedirs(self._conf_dir)\n conf_file = os.path.join(self._conf_dir, \"dql.json\")\n with open(conf_file, \"w\") as ofile:\n json.dump(self.conf, ofile, indent=2)", "def save():\n\n env.config.save(env.config_file)", "def save(self):\n\t\tself.CONFIG.save()\n\t\tself.temp_files.save()", "def save(self):\n Registry.SetKey(self.CONFIG_NAME, self.config, True)\n self.load() # for validation", "def SaveConfig(self):\n config_value = getattr(self, APPDATA)\n path_value = config_value.AbsolutePaths[0]\n default_cfg_file = os.path.join(path_value, CONFIG_FILE_NAME)\n temp_file = default_cfg_file + '.TEMP'\n if os.path.exists(default_cfg_file):\n json.dump(type(self)._CURRENT_CONFIG,\n open(temp_file.lower(),\n mode='w'),\n cls=ConfigEncoder,\n sort_keys=False,\n indent=4)\n EnsureBackup(temp_file, default_cfg_file)\n else:\n if not os.path.isdir(path_value):\n os.mkdir(path_value)\n json.dump(type(self)._CURRENT_CONFIG,\n open(default_cfg_file.lower(),\n mode='w'),\n cls=ConfigEncoder,\n sort_keys=False,\n indent=4)", "def save_config_file(self):\n wkdir = Path(self.config_dict[\"outputdir\"])\n config_filename = str(wkdir / f\"{self.config_dict['name']}.json\")\n save_config(self.config_dict, config_filename)", "def save_settings(path, server, station):\n db.save_data(path, server, station)", "def _save_config_log(self, data):\n config_path = os.path.join(self.runtime.working_dir, '.config')\n with open(config_path, 'w') as f:\n yaml.safe_dump(data, f, default_flow_style=False)", "def saveConfig(self):\r\n self.config[\"Settings\"] = {}\r\n settings = self.config[\"Settings\"]\r\n settings[\"datapath\"] = self.dataPath\r\n settings[\"videopath\"] = self.videoPath\r\n settings[\"dataoffset\"] = str(self.dataOffset)\r\n settings[\"colblindmode\"] = str(self.colBlindMode)\r\n with open(self.CONFIG_FILE,\"w\") as file:\r\n self.config.write(file)", "def save_configuration(config):\n with open(cwd + '/configuration.pickle', 'wb') as handle:\n pickle.dump(config, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def save_config(self, *args, **kwargs):\n raise NotImplementedError", "def save(self, data):\n self.write(data)", "def save_config(**kwargs):\n if kwargs == {}:\n kwargs = config._config\n current_config = _load_config()\n current_config.update(**kwargs)\n # write to disk\n fname = _get_config_fname()\n if fname is None:\n raise RuntimeError('config filename could not be determined')\n if not op.isdir(op.dirname(fname)):\n os.mkdir(op.dirname(fname))\n with open(fname, 'w') as fid:\n json.dump(current_config, fid, sort_keys=True, indent=0)", "def save(self) -> None:\n logger.info(\"Saving to config...\")\n yml.save(self._config, self.configpath)", "def _save_to_database(self, data):\n self._logger.info(\"Saving new config to database\")\n\n query1 = \"DELETE FROM project_config WHERE config_site = ?\"\n query2 = \"\"\"INSERT INTO project_config (config_site, config_json)\n VALUES (?, ?)\"\"\"\n\n dump = json.dumps(data)\n with self._bot.localdb as cursor:\n cursor.execute(\"BEGIN\")\n cursor.execute(query1, (self._bot.wikiid,))\n cursor.execute(query2, (self._bot.wikiid, dump))", "def save_config(self, path):\n if os.path.isdir(path):\n path = os.path.join(path, 'config.json')\n print('Save config to {}'.format(path))\n with open(path, 'w', encoding='utf-8') as w:\n w.write(json.dumps(self.to_dict(), indent=2,\n sort_keys=True))", "def save_config(self):\n\n if not self.__conf.has_section(self.section):\n self.__conf.add_section(self.section)\n\n for key in self._params:\n val = self._params[key]\n self.__conf.set(self.section, key, val)\n\n with open(self.conf_path, 'w') as f:\n self.__conf.write(f)", "def save(self):\n self.__config.sync()\n self.__saved = True\n Logger().debug(\"Configuration saved\")", "def save(self) -> None:\n self._client.save_config()", "def save_config(self, directory_name=None, filename=None):\n\n dirname_ = \"\" if directory_name is None else directory_name\n if dirname_ != \"\" and not os.path.exists(dirname_):\n os.makedirs(dirname_)\n\n filename_ = self.name if filename is None else filename\n filename_ = dirname_ + \"/\" + filename_ + \".json\"\n data = self.__dict__\n # Delete useless parameters\n del data[\"logger\"]\n del data[\"t_init\"]\n del data[\"t_end\"]\n del data[\"n_iter\"]\n\n JsonUtils.write_file(filename_, data)", "def save(self):\n with open(self._CONFIG_FILE_PATH, 'w') as config_file:\n json.dump(vars(self), config_file)\n return self._CONFIG_FILE_PATH", "def save():\n with open(CONFIG_FILE, 'w') as f:\n json.dump(config, f, indent=4, sort_keys=True)", "def save_config(self):\n with open(self.config_file, 'w') as fout:\n json.dump({'name_dict': self._name_dict, 'metric_dict': self._metric_dict, 'credential_path': self.credential_path, 'path_for_worksheet_name': self.path_for_worksheet_name}, fout)", "def save(self):\r\n if not self.filename:\r\n raise IOError(errors['NoConfigFileYet'])\r\n self.onSave()\r\n stuff = dict()\r\n for thing in ['aliases', 'triggers']:\r\n stuff[thing] = [] # Populate with (args, kwargs) pairs.\r\n if self.config.get('saving', thing):\r\n for c, o in getattr(self, thing).iteritems():\r\n stuff[thing].append(o.serialise())\r\n stuff['variables'] = dict()\r\n if self.config.get('saving', 'variables'):\r\n for v in self.variables:\r\n if hasattr(self, v):\r\n var = getattr(self, v)\r\n if type(var) in self.basicTypes:\r\n stuff['variables'][v] = var\r\n stuff['config'] = self.config.get_dump()\r\n with open(self.filename, 'w') as f:\r\n json.dump(stuff, f, indent = 1, sort_keys = True) # Finally write the completed dictionary.\r", "def saveData(data, file, path='./data/'):\n\twith open(\"{}{}.yml\".format(path, file), 'w') as out:\n\t\tyaml.dump(data, out)", "def save(self, save_dir):\n ProfileManager.save_data_to_disk(self.player_data, path.join(save_dir, self.player_name + '.yaml'))", "def saveSettings(self):\n helpers.saveFile(self.dataDir, self.settingsFilename, json.dumps(self.settings))", "def write_config(self, fname, data):\n with open(fname, 'w') as fhandle:\n fhandle.write(data)", "def save_last_data_location(self, dir_name):\n # TODO think about where to keep the config yaml file\n config_file_name = \"cicada/config/config.yaml\"\n config_dict = None\n if os.path.isfile(config_file_name):\n with open(config_file_name, 'r') as stream:\n config_dict = yaml.safe_load(stream)\n if config_dict is None:\n config_dict = dict()\n config_dict[\"dir_name\"] = dir_name\n with open(config_file_name, 'w') as outfile:\n yaml.dump(config_dict, outfile, default_flow_style=False)", "def save_config(self):\n\n return self.perform_action('/mgmtd/db/save')", "def save(self):\n try:\n self.write(open(self._cfg_path, 'w'))\n return True\n except PermissionError as err:\n if err.errno == 13:\n return False\n raise err", "def save(self):\n if self.hasChanged:\n filePath = self.path\n tempPath = filePath+'.tmp'\n fileDir = os.path.split(filePath)[0]\n if not os.path.exists(fileDir): os.makedirs(fileDir)\n cPickle.dump(self.data,open(tempPath,'w'))\n renameFile(tempPath,filePath,True)\n self.hasChanged = False", "def save_to_file(self, name, data):\n if os.path.isdir(\"saved_data\"):\n with open(f'saved_data/{name}.txt', 'wb') as file:\n pickle.dump(data, file)\n else:\n os.mkdir(\"saved_data\")\n self.save_to_file(name, data)", "def save_data(self, filename):\n with open(settings.DIR_PATH + '/' + filename, 'w', encoding='utf-8') as f:\n json.dump(self.data, f, indent=4)", "def saveExitConfig(self):\n newPath = self.newFolderPath.text()\n config.set(\"saveLocation\", str(newPath))\n config.save()\n self.reloadSettings()\n self.close()", "def save():\n print(\"Saving config file..\")\n\n res = yaml.round_trip_dump(_conf, indent=2, block_seq_indent=1)\n\n with open(__config_file, 'w', encoding='utf-8') as stream:\n stream.write(res)", "def save_cfg(self, output_dir):\n output_path = os.path.join(output_dir, 'level_config.cfg')\n shutil.copy(self.cfg_path, output_path)", "def saveOnFile(self, path, data):\n with open(path, \"w\") as f:\n f.write(data)", "def save(self, path):\n pass", "def save(self, path):\n pass", "def save(self, path):\n pass", "def save_conf(self):\r\n self.sendAndRecv(\"SAVECONF\\r\\n\")", "def save_data(self):\n pass", "def save_conf(self, name=None):\n \n if name:\n filename = name\n \n else:\n filename = \"conf_\" + str(self.conf[\"device\"]) + \"_\" + datetime.today().strftime('%Y-%m-%d') + \".txt\"\n \n path = \"./source/_0_time_series_class/configuration/\"\n filename = path + filename\n \n with open(filename, \"w\") as file:\n json.dump(self.conf, file)", "def save(self):\n # Always write out components in alphabetical order for determinism,\n # especially in tests.\n for function_name in sorted(self._components.keys()):\n self._config_parser[_COMPONENTS_SECTION][\n function_name] = self._components[function_name]\n\n with open(str(self._config_filepath), 'w') as f:\n self._config_parser.write(f)", "def _save_changes(self):\n copy2(self._cfg_filename, self._cfg_filename + \".bak\")\n with open(self._cfg_filename, \"w\", encoding=\"utf-8\") as self._cfg_file:\n self.write(self._cfg_file)", "def save(self):\r\n with open(self.filename, 'w') as f:\r\n if self.pretty:\r\n json.dump(self.__config, f, sort_keys=False,\r\n indent=4, separators=(',', ': '))\r\n else:\r\n json.dump(self.__config, f)", "def save(config, filename=None):\n filename = add_directory(filename or 'configure.json')\n directory = os.path.dirname(filename)\n if not os.path.exists(directory):\n os.makedirs(directory, 0o700)\n with open(filename, \"w\") as f:\n json.dump(config, f, indent=2, sort_keys=True)", "def _save_config(self, data):\n curr_conf = self.config_entry.options.copy()\n curr_conf.update(data)\n curr_conf.update(self._conf_devs_option)\n\n return self.async_create_entry(title=\"\", data=curr_conf)", "def __write_config(self):\n with open(self.config_file, 'w') as data_file:\n config = {\"ibooks_doc_root\":self.ibooks_doc_root,\n \"library_folder\":self.library_folder,\n \"annotation_folder\":self.annotation_folder,\n \"tmp_dir\":self.tmp_dir\n } \n data = json.dumps(config, ensure_ascii=False)\n data_file.write(data)", "def save(self):\n self.backend.save(list(self._d.items()))\n log.debug(\"save: {}\".format(self.backend.filename))", "def _save_configuration_to_yml(self):\n data = self.get_configuration_data()\n timestamp = self.model.timestamp\n with open(os.path.join(CHECKPOINTS_DIR, timestamp, 'config_{}.yml'.format(timestamp)), 'w') as outfile:\n yaml.dump(dict(data), outfile, default_flow_style=False)", "def save(self, directory):\n pass # pragma: no cover", "def _save_data(self, filename):\n if not os.path.isdir(os.path.dirname(filename)):\n return False\n with open(filename, 'w') as f:\n f.truncate()\n pickle.dump({\n 'user_data': self.user_data,\n 'api_data': self.api_data,\n 'profiles': self.profiles\n }, f)", "def save_inst(self):\n self.sanity_check()\n self.data_loaded_check()\n\n fname_pub_auth_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_auth_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_top, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_top, '_',\n self.config.experiment_id, '.pk'])\n\n pickle.dump(self.pub_auth_all, open(fname_pub_auth_all, 'wb'))\n pickle.dump(self.pub_auth_top, open(fname_pub_auth_top, 'wb'))\n pickle.dump(self.pub_inst_all, open(fname_pub_inst_all, 'wb'))\n pickle.dump(self.pub_inst_top, open(fname_pub_inst_top, 'wb'))\n\n fname_pub_history = ''.join([self.config.dir_data, '/history_',\n self.config.experiment_id, '.pk'])\n pickle.dump(self.history, open(fname_pub_history, 'wb'))\n\n fname_pub_staff = ''.join([self.config.dir_data, '/staff_',\n self.config.experiment_id, '.pk'])\n pickle.dump(self.staff, open(fname_pub_staff, 'wb'))", "def saveConfig(config):\n global SW_CONFIG\n cf = ConfigParser.ConfigParser()\n cf.add_section(\"dir_config\")\n cf.set(\"dir_config\", \"7zpath\", config['7zpath'])\n cf.set(\"dir_config\", \"sharefolder\", config['sharefolder'])\n cf.set(\"dir_config\", \"distpath\", config['distpath'])\n cf.add_section(\"sw_config\")\n cf.set(\"sw_config\", \"version\", config['sw_version'])\n cf.set(\"sw_config\", \"startup\", config['startup'])\n cf.add_section(\"run_config\")\n cf.set(\"run_config\", \"pop\", False)\n cf.set(\"run_config\", \"backup\", False)\n fp = open(CONFIG_FILE, \"w\")\n cf.write(fp)\n fp.close()\n SW_CONFIG = config", "def save(self):\n # Ensure store path exists\n store_path = self.manager.store_path\n if not os.path.exists(store_path):\n os.makedirs(store_path)\n \n # Get filepath\n filename = self._filename\n \n # Write into file\n raw = self.to_json()\n self.service.log.store('Saving %s' % filename)\n f = open(filename, 'w')\n f.write(raw)\n f.close()", "def save_config_file(self):\n with open(self.config_file_name, 'w',encoding='utf-8') as outfile:\n json.dump(self._config, outfile,indent=2)", "def save_config() -> None:\n with open(_config_file, \"w\", newline=\"\") as config_file:\n json.dump(_config, config_file, indent=4)\n config_file.truncate()", "def save(self):\n settings_path = os.path.join(self.file_path, \"__file_data.json\")\n f = open( settings_path, 'w' )\n f.write( simplejson.dumps( self.fileList ) )\n f.close()\n\n settings_path = os.path.join(self.file_path, \"__user_data.json\")\n f = open( settings_path, 'w' )\n f.write( simplejson.dumps( self.userList ) )\n f.close()", "def saveconfigfile(data, dirs=[\"web\", \"config\"]):\n\n filepath = os.path.join(HOME, *dirs)\n if not os.path.exists(filepath):\n os.makedirs(filepath)\n\n filename = os.path.join(filepath, \"app.json\")\n\n with open(filename, \"w\") as f:\n json.dump(data, f)", "def save_config(conf, default):\n print()\n if yes_no('Would you like to save your configuration?'):\n name = simple_response(\n 'What would you like to name your configuration?')\n path = ask_path(\n 'Please enter the path you would like your configuration saved to',\n default=default)\n file_path = os.path.join(path, name)\n if file_path.find('.json') == -1:\n file_path += '.json'\n with open(file_path, 'w+') as f:\n json.dump(conf, f, indent=4)", "def save(self, dir):\n raise NotImplementedError", "def save(self):\n #--Data file exists?\n filePath = self.path\n if os.path.exists(filePath):\n ins = open(filePath)\n outData = compat.uncpickle(ins)\n ins.close()\n #--Delete some data?\n for key in self.deleted:\n if key in outData:\n del outData[key]\n else:\n outData = {}\n #--Write touched data\n for key in self.changed:\n outData[key] = self.data[key]\n #--Pickle it\n tempPath = filePath+'.tmp'\n cPickle.dump(outData,open(tempPath,'w'))\n renameFile(tempPath,filePath,True)", "def save_path(path_to_account):\r\n with open(\"config.txt\", 'w+') as write_in_file:\r\n write_in_file.write(path_to_account)", "def saveData(self):\n pass", "def store_data(self) -> Optional[str]:\n try:\n if not self.sector:\n self._logger.warning(\"No sector to store on exit\")\n return\n last_sector_file = Settings.DATA_DIR / \"sectors\" / \".last_sector\"\n self._assert_parent_dir_exists(last_sector_file)\n with open(last_sector_file, \"w+\") as f:\n f.write(self.sector.name + \"\\n\")\n if not self._scenario:\n self._logger.warning(\"No scenario to store\")\n return\n last_scenario_file = Settings.DATA_DIR / \"scenarios\" / \".last_scenario\"\n self._assert_parent_dir_exists(last_scenario_file)\n with open(last_scenario_file, \"w+\") as f:\n f.write(self._scenario.name + \"\\n\")\n except Exception as exc:\n return f\"Error storing data: {exc}\"", "def save(self):\n\n if (self._save != '0'):\n p = self._save+self._path[-3:-1]+'_'+str(self._qn)+'.dat'\n np.savetxt(p, self._gf)\n else:\n sys.exit(\"Wrong path to save\")", "def write_config(self, data):\n logger.debug(\"[%s] Writing config\", self.name)\n self.config.write(data)", "def update(self):\n self.save_config_file()", "def save_config(config):\n with open(os.path.abspath(CONFIG_PATH), 'wb') as config_file:\n pickle.dump(config, config_file)\n return config", "def save(self, path: str):\n pass", "def write(self, path=None):\n\n if not self._path and not path:\n raise ConfigException(\"no config path given\")\n\n if path:\n self._path = path\n\n if \"~\" in self._path:\n self._path = os.path.expanduser(self._path)\n f = open(self._path, \"w\")\n f.write(json.dumps(self._data))\n f.close()", "def save(cls, context):\n\n data = context.get_stored_dict()\n files = {}\n\n def save_in_file(file, key, value):\n if file in files.keys():\n files[file][key] = value\n else:\n files[file] = {key: value}\n\n for key, val in data.items():\n if context.extends is not None and key in context.key_origins:\n save_in_file(context.key_origins[key], key, val)\n else:\n save_in_file(context.profile, key, val)\n\n for profile, content in files.items():\n metadata.update_metadata(\n context.workspace,\n profile,\n 'config',\n content)", "def save():", "def write(self, filename: str):\n obj = self.to_dict(self)\n config.write(obj, filename)", "def saveCurrentConfig():\n cf = ConfigParser.ConfigParser()\n cf.add_section(\"dir_config\")\n cf.set(\"dir_config\", \"7zpath\", SW_CONFIG['7zpath'])\n cf.set(\"dir_config\", \"sharefolder\", SW_CONFIG['sharefolder'])\n cf.set(\"dir_config\", \"distpath\", SW_CONFIG['distpath'])\n cf.add_section(\"sw_config\")\n cf.set(\"sw_config\", \"version\", SW_CONFIG['sw_version'])\n cf.set(\"sw_config\", \"startup\", SW_CONFIG['startup'])\n cf.add_section(\"run_config\")\n cf.set(\"run_config\", \"pop\", RUN_CONFIG['pop'])\n cf.set(\"run_config\", \"backup\", RUN_CONFIG['backup'])\n cf.add_section(\"hook_config'\")\n for k, v in HOOK_CONFIG:\n cf.set(\"hook_config\", k, v)\n fp = open(CONFIG_FILE, \"w\")\n cf.write(fp)\n fp.close()", "def save_to_config(self) -> None:\n config_path = os.path.join(self.base_path, \"config.json\")\n\n with open(config_path, \"r\") as _json:\n c_dict = json.load(_json)\n\n c_dict[\"mean_similarity_error\"] = self.ME\n c_dict[\"similarity_correlation\"] = self.pearson_corr\n c_dict[\"similarity_spearman_correlation\"] = self.spearman_corr\n\n with open(config_path, \"w\") as _json:\n json.dump(c_dict, _json)", "def write(self, path):\n with open(path, \"w\") as fh_:\n fh_.write(self.config())" ]
[ "0.78885645", "0.7684677", "0.76479673", "0.76162696", "0.75278085", "0.7526425", "0.7504001", "0.74552506", "0.74546015", "0.72604036", "0.72053385", "0.7191787", "0.71898353", "0.7184517", "0.7153047", "0.7149685", "0.71368116", "0.71217936", "0.7120903", "0.7091909", "0.70070773", "0.69862264", "0.69768053", "0.69314796", "0.6922834", "0.6920655", "0.6898097", "0.68969643", "0.686961", "0.6827954", "0.6814349", "0.6790731", "0.6788581", "0.67870694", "0.67794037", "0.67548937", "0.67462194", "0.67433435", "0.6742296", "0.67407537", "0.6736887", "0.67310554", "0.67262924", "0.6694182", "0.66934305", "0.66700333", "0.6664578", "0.665556", "0.66474265", "0.66339463", "0.6628766", "0.6626798", "0.6622704", "0.66131896", "0.66116977", "0.66116977", "0.66116977", "0.6611187", "0.6610869", "0.66072315", "0.66033673", "0.6600847", "0.65980655", "0.6596788", "0.658724", "0.6586824", "0.6575831", "0.6575751", "0.65690786", "0.65650237", "0.6553635", "0.65463966", "0.65287405", "0.6521689", "0.6520119", "0.6509302", "0.6503174", "0.64901954", "0.6481194", "0.6480922", "0.6480147", "0.64793557", "0.64453053", "0.6434084", "0.6431627", "0.6429259", "0.64290136", "0.64222986", "0.64210397", "0.64205325", "0.6403911", "0.63919556", "0.63916755", "0.6390151", "0.6376735" ]
0.7392408
13
Load a piece of data from the configuration directory.
def load_data(data_name) -> str: with open(config_path / data_name, "r") as f: data = f.readline().strip().split()[0] return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load():\n # get (or create) config path\n p = initialize()\n return load_config(open(p['config']))", "def load_data_from_config(self):\n\n config_file_name = \"cicada/config/config.yaml\"\n config_dict = None\n self.labels = []\n self.to_add_labels = []\n if os.path.isfile(config_file_name):\n with open(config_file_name, 'r') as stream:\n config_dict = yaml.safe_load(stream)\n print(f\"config_dict {config_dict}\")\n if (config_dict is not None) and config_dict.get(\"dir_name\"):\n self.load_data_from_dir(dir_name=config_dict[\"dir_name\"], method='clear')", "def load_from_conf(self):\r\n raise NotImplementedError", "def load_from_conf(self):\n raise NotImplementedError", "def load_data_conf(self):\n data_file = select_file(os.getcwd())\n if data_file is not None:\n self.load_tab(data_file)\n else:\n msg_window('please select valid data config file')", "def _load(self, directory):\n pass", "def load_config(self):\n pass", "def load_config(self):\n if os.path.exists(self.config_file):\n with open(self.config_file) as f:\n conf = json.load(f)\n\n self.update_attributes_from_config(conf)", "def load_data(self, dirname, conf_file=None, loader_cls=PMCTRACKLoader):\n self.sources.append(str(dirname))\n\n # Load configuration\n if conf_file is None:\n try:\n conf_file = list(dirname.glob(\"*.conf\"))[0]\n self.conf = TrackSettings(conf_file)\n except (IndexError, AttributeError):\n msg = (\n \"Track settings file (.conf) in the `dirname` directory\"\n \"is missing or could not be read\"\n )\n warnings.warn(msg, MissingConfWarning)\n\n # Load the tracks\n loader_obj = loader_cls(dirname=dirname)\n self.data = loader_obj()\n self.columns = self.data.columns", "def load_conf(self):\n self._read_uconf()", "def load(self):\n try:\n _config_file = open(self.config, 'r+')\n data = json.loads(_config_file.read())\n except (ValueError, IOError):\n data = {}\n\n self.update(data)", "def testLoadConfiguration(self):\n loader = Loader()\n loader.loadFromDirectory(self.__exampleDirectory)\n\n self.assertEqual(len(loader.taskHolders()), 1)\n\n self.assertEqual(\n os.path.basename(loader.taskHolders()[0].var('contextConfig')),\n 'config.hjson'\n )", "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "def load_config():\n\t\ttry:\n\t\t\tconf = ConfigParser()\n\n\t\t\tconfig_path = get_config_path()\n\t\t\tconf.read(config_path)\n\n\t\t\t# save references to conf, and config_path in class variables\n\t\t\tConfig.config_path = config_path\n\t\t\tConfig.conf = conf\n\n\t\t\tConfig.source_dir = conf.get('paths', 'source_dir')\n\t\t\tConfig.lyrics_dir = conf.get('paths', 'lyrics_dir')\n\n\t\t\tConfig.save_to_file = conf.getboolean('actions', 'save_to_file')\n\t\t\tConfig.save_to_tag = conf.getboolean('actions', 'save_to_tag')\n\n\t\t\tConfig.overwrite = conf.getboolean('actions', 'overwrite')\n\n\t\t\t# Load all the sources\n\t\t\tConfig.lyric_wikia = conf.getboolean('sources', 'lyric_wikia')\n\t\t\tConfig.musix_match = conf.getboolean('sources', 'musix_match')\n\t\t\tConfig.lyricsmode = conf.getboolean('sources', 'lyricsmode')\n\t\t\tConfig.az_lyrics = conf.getboolean('sources', 'az_lyrics')\n\n\t\t\t# Loading this with user config, we need to call the load_config only once at start.\n\t\t\tConfig.lyric_files_in_dir = glob2.glob(os.path.join(Config.lyrics_dir, '**/*.txt'))\n\n\n\t\t# Catch file handling errors\n\t\texcept IOError as e:\n\t\t\tprint('Unable to load config.')\n\t\t\tprint(e)", "def loadConf(self):\n\n with open(self.configFile) as f:\n self.config = json.load(f)", "def load_config(self):\r\n with open('config.json', 'r') as f:\r\n self.config = json.load(f)", "def _loadConfig(self):\n self._packRoot = getattr(sys, \"_MEIPASS\", path.abspath(path.dirname(__file__)))\n rootDir = path.abspath(path.join(self._packRoot, path.pardir))\n logger.debug(\"MOTools root dir is: %s\" % rootDir)\n\n metConf = path.join(rootDir, \"met_config\", \"met_config.json\")\n mainConf = path.join(rootDir, \"main_config.json\")\n userConf = path.join(rootDir, \"user_config.json\")\n\n self._confData = {\n \"MET\": {\"path\": metConf, \"config\": {}, \"loaded\": False},\n \"MAIN\": {\"path\": mainConf, \"config\": {}, \"loaded\": False},\n \"USER\": {\"path\": userConf, \"config\": {}, \"loaded\": False},\n }\n\n for confGroup in self._confData:\n confFile = self._confData[confGroup][\"path\"]\n logger.debug(\"Loading %s config file\" % confGroup)\n if path.isfile(confFile):\n jsonData = {}\n try:\n with open(confFile, mode=\"r\") as inFile:\n jsonData = json.loads(inFile.read())\n if \"config\" in jsonData:\n self._confData[confGroup][\"config\"] = jsonData[\"config\"]\n self._confData[confGroup][\"loaded\"] = True\n except Exception as e:\n logger.error(\"Failed to parse config JSON data.\")\n logger.error(str(e))\n return False\n else:\n logger.debug(\"No file: %s\" % confFile)\n\n # if not self._confData[\"MAIN\"][\"loaded\"]:\n # logger.error(\"Failed to load minimum configuration file main_config.json.\")\n # raise RuntimeError\n\n return", "def load ( self ):\n files = config.get_or_fail ( 'REPO.config_files' )\n for f in files:\n self.load_file ( f )", "def load(file):\n _config.load(file)", "def load(self):\n settings_path = os.path.join(self.file_path, \"__file_data.json\")\n if os.path.exists( settings_path ):\n self.fileList = simplejson.loads( open( settings_path, 'r' ).read() )\n\n settings_path = os.path.join(self.file_path, \"__user_data.json\")\n if os.path.exists( settings_path ):\n self.userList = simplejson.loads( open( settings_path, 'r' ).read() )", "def load_configuration(self, path):\n with open(path) as conf_file:\n if path.name not in self.configuration:\n self.configuration[path.name] = {}\n self.configuration[path.name] = json.load(conf_file)", "def load_config_data(fname, direc=\"data/config/\"):\n path = create_file_path(fname, direc)\n try:\n with open(path, \"rt\") as fp:\n return json.load(fp)\n except (IOError, ValueError):\n raise IOError(\"Failed to open '{}\".format(path))", "def loadConfigs(self):\n self.onLoadConfig(urlopen(self.inipath))", "def load_config(self):\n with open(self.TEMPERATURE_CONFIG_FILE_PATH, 'r') as file:\n self.config = json.load(file)", "def load_config(self):\n conf_file = os.path.join(self._conf_dir, \"dql.json\")\n if not os.path.exists(conf_file):\n return {}\n with open(conf_file, \"r\") as ifile:\n return json.load(ifile)", "def load_config():\n here = os.path.dirname(os.path.abspath(__file__))\n config_path = os.path.join(here, 'config.json')\n with open(config_path, encoding='utf-8') as f:\n return json.load(f)", "def load_data(self) -> None:", "def load(self):\n with sppasPathSettings() as sp:\n config = os.path.join(sp.etc, \"sppas.json\")\n if os.path.exists(config) is False:\n raise OSError(\"No such file or directory: {:s}\".format(config))\n else:\n with open(config) as cfg:\n self.__dict__ = json.load(cfg)", "def load_config_raw_data(conf):\n path = Path(conf[\"conf_raw_data\"])\n with open(path) as f:\n txt = f.read()\n conf = json.loads(txt)\n return conf", "def _file_loader(self) -> dict:\n cfg = None\n try:\n with open(self._path) as file:\n cfg = json.loads(file.read())\n except FileNotFoundError as e:\n print(e)\n exit(1)\n return cfg", "def reads(self, data):\n\n self.parser = configparser.ConfigParser()\n ds = io.StringIO(data)\n ds.name = os.path.expanduser(os.path.join('~', RC_FILE))\n self.parser.readfp(ds)", "def __read_config(self):\n with open(self.config_file, 'r') as data_file:\n dict = json.load(data_file)\n self.ibooks_doc_root = dict[\"ibooks_doc_root\"]\n self.library_folder = dict[\"library_folder\"]\n self.annotation_folder = dict[\"annotation_folder\"]\n self.tmp_dir = dict[\"tmp_dir\"]", "def load(path):\n pass", "def test_load_configuration_loads_main_file():\n config.load_configuration(main_configuration_path)\n assert config.get('test.nested.path.value') == 'test value'", "def load_config_file(filename):\n test_data_path = get_config_file_path(filename)\n with open(test_data_path) as f:\n test_data = f.read()\n return test_data", "def loadConfig(self):\r\n self.config.read(self.CONFIG_FILE)\r\n try:\r\n assert \"Settings\" in self.config\r\n except AssertionError:\r\n print(\"Settings do not exist, creating new config file...\")\r\n self.saveConfig()\r\n settings = self.config[\"Settings\"]\r\n self.dataPath = settings.get(\"datapath\",fallback=\"\")\r\n self.videoPath = settings.get(\"videopath\",fallback=\"\")\r\n self.dataOffset = settings.getfloat(\"dataoffset\",fallback=0)\r\n self.colBlindMode = settings.getboolean(\"colblindmode\",False)\r\n if self.videoPath != \"\":\r\n self.loadVideo(self.videoPath,loadAudio=False)\r\n if self.dataPath != \"\":\r\n self.loadData(self.dataPath)", "def _load_config(self):\n\n for p in self._paths:\n if p.exists():\n with p.open() as f:\n c = yaml.safe_load(f)\n if c:\n c['_config_file'] = str(p)\n return c\n else:\n raise ConfigurationError(f\"Didn't find a config file in paths: {self._paths}\")\n\n return {}", "def load(config_file_name=\"network_importer.toml\", config_data=None):\n global SETTINGS\n\n if config_data:\n SETTINGS = _configure_backend(Settings(**config_data))\n return\n\n if os.path.exists(config_file_name):\n config_string = Path(config_file_name).read_text()\n config_tmp = toml.loads(config_string)\n SETTINGS = _configure_backend(Settings(**config_tmp))\n return\n\n SETTINGS = Settings()", "def read_config(self, config_filename):", "def load_data(self):\n try:\n self.manager.load()\n except error:\n show_error_message(title='Initialization error!',\n message='File lords.sdb was not found!')\n else:\n self.update_widgets_values()", "def load_config_file(self):\n\n conf_file = config.DEFAULT_CONFIGURATION_FILE\n\n if self.options and getattr(self.options, \"conf_file\"):\n conf_file = self.options.conf_file\n if (\n not os.path.exists(conf_file) and\n not os.path.exists(\"%s.d\" % conf_file)\n ):\n raise Exception(\n (\n \"The specified configuration file \"\n \"does not exist. File=(%s)\"\n ) % self.options.conf_file\n )\n\n self.from_file(conf_file)", "def load_configurations(self):\n path = os.path.join(self.user_directory, \"config\")\n configurations = {\n \"data_connector\": DataConnectorConfiguration,\n \"formats\": FormatsConfiguration,\n \"server\": ServerConfiguration,\n }\n\n for filename, configuration in configurations.items():\n config_path = os.path.join(path, filename + \".yml\")\n configuration = configuration.read_YAML(config_path)\n self.configurations[filename] = configuration", "def load_conf(self, filename):\n\n path = \"./source/_0_time_series_class/configuration/\"\n filename = path + filename\n \n with open(filename) as file:\n self.conf = json.loads(file.read())", "def test_load_from_file(self):\n cf = ConfigFile()\n cf.load_from_file(TestConfigFile.TEST_CONFIG)\n\n self.assertEqual(4, len(cf))\n self.assertEqual(cf[\"key1\"], \"val1\")\n self.assertEqual(cf[\"key2\"], \"val2\")\n self.assertEqual(cf[\"key3\"], \"val3\")\n self.assertEqual(cf[\"key4\"], \"val4\")", "def load(self, path):\n pass", "def load(self, path):\n pass", "def load(filepath):\n with open(filepath) as f:\n return Config(json.load(f))", "def loadseasoning(self):\n stream = open(self.fileref)\n self.config = yaml.safe_load(stream)\n stream.close()", "def load_data(path):\n ns = {}\n execfile(path, ns)\n return ns['data']", "def _load_config():\n\tcfg = configparser.ConfigParser()\n\tcfg.read(os.path.join(get_current_directory(), 'citi.config'))\n\treturn cfg", "def load(cls):\n cls._api_key = \"\"\n cls._token = \"\"\n data = None\n\n try:\n data = literal_eval(cls.config_file.read_text())\n cls._api_key = data[\"key\"]\n cls._token = data[\"token\"]\n except Exception:\n pass\n\n return data", "def loadconfig():\n CONFIG['static_folder'] = str(Path(Path(APP.root_path).parent, 'static'))\n\n for cfile in Path(APP.instance_path).iterdir():\n if cfile.name[-5:] == '.json' and cfile.name != 'config.json':\n name = cfile.name[:-5]\n LOG.debug(\"Loading \" + name)\n with cfile.open() as json_data_file:\n CONFIG[name] = json.load(json_data_file)", "def load(self, filename=None):\n prefix = os.path.dirname(filename)\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n\n name = filename or self.filename\n\n if os.path.exists(name):\n with open(name, 'rb') as dbfile:\n self.data = yaml.safe_load(dbfile) or dict()", "def load(self, root='.'):\n # default config\n self._conf = conf = ConfigParser(interpolation=None)\n conf.read_dict(self.DEFAULT)\n\n # user config\n self._load_config(os.path.join(WSB_USER_DIR, WSB_CONFIG), conf)\n self._load_config(WSB_USER_CONFIG, conf)\n\n # book config\n self._load_config(os.path.join(root, WSB_DIR, WSB_CONFIG), conf)\n\n # map subsections\n self._data = OrderedDict()\n for section in conf.sections():\n sectionobj = OrderedDict()\n m = re.search(r'^(\\S*)(?:\\s*\"([^\"\\]]*)\"\\s*)?$', section)\n if m:\n sec, subsec = m.group(1), m.group(2) or ''\n if sec in self.SUBSECTED:\n self._data.setdefault(sec, OrderedDict())[subsec] = sectionobj\n for key in conf[section]:\n try:\n sectionobj[key] = getattr(conf[section], self.TYPES[sec][None][key])(key)\n except KeyError:\n sectionobj[key] = conf[section][key]\n continue\n self._data[section] = sectionobj\n for key in conf[section]:\n try:\n sectionobj[key] = getattr(conf[section], self.TYPES[section][key])(key)\n except KeyError:\n sectionobj[key] = conf[section][key]", "def load(path: str, config_cls):\n\n return cfg.load(path, config_cls)", "def _load_config():\n\tcfg = configparser.ConfigParser()\n\tcfg.read(join(get_current_path(), 'ib.config'))\n\treturn cfg", "def load_from_config(self, **config: Any) -> None:\n for key, filename in config.items():\n self.load(filename, key)", "def load(self):\n with open(self.conf_fname, \"r\") as fd:\n config = json.load(fd)\n \n return config", "def load_config():\n config_file = os.path.dirname(os.path.abspath(__file__)) + '/../config.json'\n with open(config_file, 'r') as f:\n config = json.load(f)\n\n return config", "def _load (cls, *files):\n config = ConfigParser.ConfigParser()\n config.read(files)\n \n metadata = {}\n if config.has_section(\"metadata\"):\n for key in config.options(\"metadata\"):\n metadata[key] = config.get(\"metadata\", key)\n\n processes = {}\n datasources = {}\n for section in config.sections():\n if section == \"metadata\": continue\n if section.startswith(\"process_\"):\n try:\n processes[section[8:]] = FeatureServer.Processing.loadFromSection(config, section)\n except Exception, E:\n pass \n else: \n datasources[section] = cls.loadFromSection(\n config, section, 'DataSource')\n\n return cls(datasources, metadata, processes)", "def load_config():\n global config\n\n with open(\"config.json\") as f:\n json_config = f.read()\n f.close()\n config = json.loads(json_config)", "def load(filename):\n conf = CommonConfig.get()\n conf.update(toml.load(filename))\n return conf", "def load(self, config_instance):\r\n pass", "def load(self):\n if not path.isfile(self.SETTINGS_FILE):\n return\n data = load_json_from_disk(self.SETTINGS_FILE)\n for (key, value) in data.items():\n self.__dict__[key] = value", "def _load_config():\n fname = _get_config_fname()\n if fname is None or not op.isfile(fname):\n return dict()\n with open(fname, 'r') as fid:\n config = json.load(fid)\n return config", "def _load_config_file(self, path: str) -> Dict[str, Any]:\n try:\n with open(path) as file:\n conf = json.load(file)\n except FileNotFoundError:\n raise OperationalException(\n f'Config file \"{path}\" not found!'\n ' Please create a config file or check whether it exists.')\n\n return conf", "def read_configuration (self):\n\t\tself.config.read(self._configfile)", "def load_cfg(self, path):\n if os.path.exists(path):\n self.djs_core = Librarian(path)\n if self.djs_core.load_cfg():\n self.plugins = self.djs_core.debug_info()['plugins']\n tmp = [plug.split(\":\") for plug in self.plugins]\n result = {}\n for lis in tmp:\n if not lis[0] in result:\n result[lis[0]] = []\n result[lis[0]].append(lis[1])\n self.info = dict2table(result)\n print(\"Load done.\\n\")\n else:\n print(\"Configuration file path not found.\\n\")", "def load_configurations() :\n\n local_path = os.path.dirname(os.path.abspath(__file__))\n print(local_path)\n file_path = local_path + os.sep + 'conf.ini'\n parser = configparser.ConfigParser()\n\n if os.path.exists(file_path) :\n config = parser.read(file_path)\n else :\n parser['PATH'] = {}\n parser['PATH']['PATH_TO_DB'] = os.path.expanduser('~/inlusio_data/InlusioDB_Juni_2015.sqlite')\n parser['PATH']['PHYSIO_PATH'] = os.path.expanduser('~/inlusio_data')\n print('Creating new configuration file!!!')\n print('Please fit conf.ini to your local data path!')\n with open(file_path, 'w') as configfile:\n parser.write(configfile)\n\n return parser", "def _load_config_file(self, config_type):\n cloudwatch_config = self.provider_config[\"cloudwatch\"]\n json_config_file_section = cloudwatch_config.get(config_type, {})\n json_config_file_path = json_config_file_section.get(\"config\", {})\n json_config_path = os.path.abspath(json_config_file_path)\n with open(json_config_path) as f:\n data = json.load(f)\n return data", "def load_data(self):", "def _load_config_log(self):\n config_path = os.path.join(self.runtime.working_dir, '.config')\n if not os.path.isfile(config_path):\n return {}\n with open(config_path, 'r') as f:\n data = yaml.load(f)\n return data", "def load_from_file(config_path):\n return load_json_file(config_path)", "def get_data(path=None):\n\n # use default path \n if not path:\n path = os.path.relpath(\"config.json\")\n \n try:\n with open(path, mode=\"r\") as f:\n data = json.load(f)\n return data\n except Exception as e:\n print(e)", "def read_config(self, data):\n raise NotImplementedError()", "def load_config(self, path=\"\"):\n if not path:\n if not os.path.isdir(CONFIG_DIR):\n os.makedirs(CONFIG_DIR)\n file_path = QtGui.QFileDialog.getOpenFileName(self,\n \"Open Config\",\n CONFIG_DIR,\n \"Config Files (*.cfg)\")\n else:\n file_path = path\n self._load_state(file_path)\n #self.write_text(\"Loaded config @ {}\".format(file_path))", "def load_config():\n config = ConfigParser()\n config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))\n return config", "def load(self, filename: str = None):\n if not filename:\n filename = 'config.yml'\n\n if (f := self.__home / filename).exists():\n filename = f\n elif (f := Path(filename)).exists():\n filename = f\n else:\n raise FileNotFoundError(f'File {filename} not found')\n\n with filename.open(encoding='utf-8') as f:\n data = yaml.load(f, Loader=ConfigYAMLLoader)\n self.update(data)\n\n self.__home = Path(filename).parent\n return self", "def loadConfig():\n lines = []\n config = {}\n here = path.dirname(__file__)\n fn = path.join(here,'manatee.conf')\n try:\n with codecs.open(fn,'rU','utf-8') as conf:\n lines = conf.readlines()\n conf.close()\n except IOError as e:\n print \" Could not open configuration file: %s\" % e\n\n for line in lines:\n try:\n line = line.strip()\n if line:\n values = [x.strip() for x in line.split('=')]\n config[values[0]] = values[1]\n except Exception as e:\n print \"There was an error in the configuration file: %s\" % e\n # TODO: Any strings from the config file that might be displayed or passed into the SQL server need to be validated here.\n# config = validateConfig(config)\n return config", "def load_config(self, where):\n where = load_object(where, level=3)\n fire_actions(where, tags='tangled.web', args=(self,))", "def load(self, configs, container):\n pass;", "def load_config(path_: str) -> Any:\n path = find_system(path_).path\n if path is None:\n raise ValueError(\"Can't find path {path_!r}\".format(path_=path_))\n loader: Callable[[Any], Any]\n if path.endswith('.yaml'):\n loader = yaml.safe_load\n elif path.endswith('.json'):\n loader = json.load\n else:\n raise ValueError('No known loader for {0}'.format(path))\n with open(path) as file_object:\n return loader(file_object)", "def load( self ):\n ini = codecs.open(self.filename,\"r\",\"utf-8\",errors=\"replace\",buffering=0)\n for l in ini:\n l = l.strip()\n if l:\n (name,value) = l.split(\"=\",1)\n self.conf[name.strip()] = value.strip()\n ini.close()", "def load_config(filename):\n filepaths = []\n for dirpath in os.path.expanduser('~'), os.curdir, '':\n try:\n filepath = os.path.join(dirpath, filename)\n filepaths.append(filepath)\n with open(filepath, 'r') as f:\n return Config(yaml.safe_load(f))\n except IOError:\n pass\n raise IOError('Configuration file not found: ' + ', '.join(filepaths))", "def __init__(self):\n self.filename = pathlib.Path(__file__).parent.absolute().__str__() + '/../../data/config.ini'\n self.data = ConfigParser()\n self.data.read(self.filename)", "def test_load_config_safe(self):\n self.__test_load_config_safe(\".scuba.yml\")", "def load_data(self):\n raise NotImplementedError()", "def load_experiment(self):\n load_dir = select_dir(os.getcwd())\n if load_dir is not None:\n if os.path.isfile(os.path.join(load_dir, 'conf', 'config')):\n self.load_main(load_dir)\n else:\n msg_window('missing conf/config file, not experiment directory')\n return\n\n if self.t is None:\n self.t = Tabs(self)\n self.vbox.addWidget(self.t)\n self.t.clear_configs()\n self.t.load_conf(load_dir)\n\n self.set_experiment(True)\n else:\n msg_window('please select valid conf directory')", "def load_conf(self):\n\n self.load_file(self.ini_file)\n self.files = []\n conf_file = open(self.ini_file, \"r\")\n for l in conf_file:\n self.files.append(l.strip())\n conf_file.close()", "def load(self, path: str):\n pass", "def loadConfig(fileName=None):\n if not fileName:\n fileName = Config.userDir + \"config.py\"\n try:\n config = literal_eval( (open(fileName).read()) )\n except Exception,e:\n print(e)\n return\n for c in Config.userConfig:\n if c in config:\n setattr(Config, c, config[c])\n Config.update()", "def test_load(yaml_config_file):\n config = Config()\n config.load(PATH_FILE_CONFIG)\n assert config.backup_root_directory == yaml_config_file.backup\n assert config.docker_compose_wordpress_project_directory == yaml_config_file.docker_compose_wordpress_project", "def load_config(device, filename):\n with open(filename, 'r') as f:\n config_data = json.load(f)\n\n device.send_configuration(config_data)", "def __init__(self):\n # Read configuration into dictionary\n self.directories = general.config_directories()\n self.config = general.read_yaml_files(self.directories)", "def load_conf():\n if os.path.exists(CONF_FILE):\n with open(CONF_FILE, 'r') as infile:\n return json.load(infile)\n else:\n return {}" ]
[ "0.7237997", "0.71998596", "0.70477736", "0.701441", "0.69010216", "0.6879877", "0.6821252", "0.6756391", "0.6728431", "0.66508085", "0.66283035", "0.65852034", "0.6569168", "0.6547239", "0.64764965", "0.6467039", "0.64538944", "0.64244694", "0.64153147", "0.6405894", "0.6381386", "0.63582796", "0.63569576", "0.63364214", "0.63261724", "0.62991476", "0.6256047", "0.62456536", "0.6240982", "0.62219226", "0.62083626", "0.6198483", "0.6197847", "0.6197261", "0.618429", "0.6180445", "0.6170766", "0.61699945", "0.61679626", "0.61665606", "0.6165294", "0.6157825", "0.61507726", "0.61331403", "0.6128744", "0.6128744", "0.612745", "0.61262697", "0.6107761", "0.61013246", "0.60999715", "0.60954964", "0.60886264", "0.6083456", "0.6079077", "0.6078496", "0.60781723", "0.6064951", "0.60437536", "0.6041239", "0.6036118", "0.6026933", "0.6013139", "0.6011873", "0.600755", "0.60040754", "0.59957534", "0.59917754", "0.59887004", "0.5987587", "0.59848195", "0.59812546", "0.59679073", "0.5963523", "0.5955968", "0.5947473", "0.59474427", "0.5943924", "0.5942329", "0.5931311", "0.59301865", "0.59268683", "0.59150136", "0.59140056", "0.5905166", "0.59021264", "0.5899263", "0.58949345", "0.5894419", "0.58787197", "0.5877993", "0.58774173", "0.5873804", "0.58706236", "0.5865912" ]
0.59277076
85
Get a single TW task as an Albert Item.
def get_tw_item(task: taskw.task.Task) -> v0.Item: # type: ignore field = get_as_subtext_field task_id = tw_side.get_task_id(task) actions = [ FuncAction( "Complete task", lambda args_list=["done", task_id]: run_tw_action(args_list), ), FuncAction( "Delete task", lambda args_list=["delete", task_id]: run_tw_action(args_list), ), FuncAction( "Start task", lambda args_list=["start", task_id]: run_tw_action(args_list), ), FuncAction( "Stop task", lambda args_list=["stop", task_id]: run_tw_action(args_list), ), FuncAction( "Edit task interactively", lambda args_list=["edit", task_id]: run_tw_action(args_list, need_pty=True), ), FuncAction( "Fail task", lambda task_id=task_id: fail_task(task_id=task_id), ), ClipAction("Copy task UUID", f"{task_id}"), ] found_urls = url_re.findall(task["description"]) if "annotations" in task.keys(): found_urls.extend(url_re.findall(" ".join(task["annotations"]))) for url in found_urls[-1::-1]: actions.insert(0, UrlAction(f"Open {url}", url)) if reminders_tag_path.is_file(): global reminders_tag reminders_tag = load_data(reminders_tag_path) else: save_data("remindme", str(reminders_tag_path)) actions.append( FuncAction( f"Add to Reminders (+{reminders_tag})", lambda args_list=[ "modify", task_id, f"+{reminders_tag}", ]: run_tw_action(args_list), ) ) actions.append( FuncAction( "Work on next (+next)", lambda args_list=[ "modify", task_id, "+next", ]: run_tw_action(args_list), ) ) urgency_str, icon = urgency_to_visuals(task.get("urgency")) text = task["description"] due = None if "due" in task: due = task["due"].astimezone(dateutil.tz.tzlocal()).strftime("%Y-%m-%d %H:%M:%S") # type: ignore return get_as_item( text=text, subtext="{}{}{}{}{}".format( field(urgency_str), "ID: {}... | ".format(tw_side.get_task_id(task)[:8]), field(task["status"]), field(task.get("tags"), "tags"), field(due, "due"), )[:-2], icon=[str(icon)], completion=f'{curr_trigger}{task["description"]}', actions=actions, urgency=task.get("urgency"), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTask():\n\tcontent = requests.get(MANAGER_URL+\"task\", params={\"apiKey\": API_KEY}).text\n\tif content == \"null\":\n\t\treturn None\n\telse:\n\t\treturn json.loads(content)", "def get_task(task_id):\n return db.task.find_one({'_id': ObjectId(task_id)})", "def get_item(self):\n return self.item", "def get_item(self):\n return self.item", "def get(self, task_id=None):\n if task_id:\n item = self.find(task_id)\n self.queue.remove(item)\n else:\n item = self.queue.get()\n return item", "def get(self, guid):\n key = db.Key.from_path('Task', int(guid))\n task = db.get(key)\n if not task == None:\n guid = \"%s\" % task.key().id_or_name()\n task_json = { \"id\": \"%s\" % guid, \"name\": task.name,\n \"priority\": task.priority, \"effort\": task.effort,\n \"projectId\": task.projectId,\n \"submitterId\": task.submitterId, \"assigneeId\": task.assigneeId,\n \"type\": task.type, \"developmentStatus\": task.developmentStatus,\n \"validation\": task.validation, \"description\": task.description,\n \"createdAt\": task.createdAt,\n \"updatedAt\": task.updatedAt }\n \n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(task_json))\n else:\n self.response.set_status(404, \"Task not found\")", "def getitem(itemID):\n\n return harvest(GET_ITEM_URL, itemID)", "def get_item(self, id: str, user: User) -> Optional[T]:", "def GetItem(self):\r\n \r\n return self._item", "def get_task(self, id):\n\n collection = self._get_collection()\n\n item = collection.find_one({\"_id\": ObjectId(id)})\n\n if item:\n return _mongo_item_to_task(item)\n else:\n return None", "def get(self, guid):\n results = j.sal.fs.find(self._root, '*_%s' % guid)\n if len(results) <= 0:\n raise TaskNotFoundError(\"task %s not found\" % guid)\n if len(results) > 1:\n raise RuntimeError(\"found 2 tasks with same guid, this should not happen\")\n return self._deserialize_task(j.sal.fs.readFile(results[0]))", "def get(self, task_id):\n try:\n return self.dal.task.get_by_id(task_id)\n except EntityNotFound:\n raise DoesNotExist()", "def get_task(self, task_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"tasks\", \"task_id\", task_id)", "def get_task(self, task_id):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks WHERE id=?\", (task_id,))\n return res.fetchone()", "def get_task_by_tid(self, tid):\n return self.task_controller.get_task(tid)", "def get_item(item_id):\n return Item.query.filter_by(id=item_id).first()", "def get_item(self, item_id): # pragma: no cover\n raise NotImplementedError", "def get_item(self):\n raise NotImplementedError", "def get_item(self, name: str) -> Optional[Item]:\n item = self.filter_items(name, limit=1)\n return item[0] if item else None", "def get_by_name(task_name):\n return tasks.find_one({'name': task_name})", "def getItem(self, id):\n path = 'item/' + id\n return self.sendRestRequest('GET', path)", "def get_task(task_id):\n try:\n return Task.objects.get(id=task_id)\n except ObjectDoesNotExist:\n raise ObjectDoesNotFound(\n 'There is no task with id={}.'.format(task_id))", "def _get_task(self, task):\n try:\n return TASKS[task]\n except KeyError:\n raise ValueError(\"task %s \"\n \"is not supported. \" % task)", "def taskdetail_get(td_id):\n return IMPL.taskdetail_get(td_id)", "def get_item(self, call_number):\n return self.item_list.get(call_number)", "def get_task(self, name):\n res = Task()\n self.GetTask(name, res)\n return res", "def getItem(self):\n return self.getItem(0)", "def __getitem__(self, txid: int) -> asyncio.Task:\n return self._tasks[txid]", "def get(self, name, task):\n assert name, \"Must input a valid dataset name.\"\n assert task, \"Must input a valid task name.\"\n self._assert_dataset_exists_in_cache(name)\n self._assert_task_exists_in_dataset_in_cache(name, task)\n return self.manager.data[\"dataset\"][name][\"tasks\"][task]", "def getItem(self) -> Optional[items.Item]:\n return None if self.__itemRef is None else self.__itemRef()", "def get_item(self, index: int) -> _T:\n return self.index_to_item[index]", "def get(self, item):\n if isinstance(item, str):\n item = self.transaction_index[item]\n return self.transaction_list[item]", "def read_item(id: str, request: Request):\n obj = db.get(id, kind=endpoint_model)\n return obj", "def get(self, item_name):\n if isinstance(item_name, BaseItem):\n return item_name\n return self.all_items.get(item_name)", "def get(self, project_id, task_id):\n try:\n task = backend.get(Task, {'project.pk': request.project.pk, 'pk': task_id},\n only=self.export_fields, include=('project',), raw=True)\n except Task.DoesNotExist:\n return {'message': \"unknown task\"}, 404\n return {'task': self.export(task)}, 200", "def getItem(self, itemID, no_html=False):\n data = self._client.Item.find(int(itemID))\n item = self.makeDict(data, no_html=no_html)\n return item", "def task(self) -> base_model.BaseTask:\n return self._task", "def get_item_by_id(self, item_id):\n\n return self.api.items.get(item_id)['item']", "def get(self, id):\n task = get_task(get_db(), id)\n if not task:\n api.abort(404, f\"Invalid task with id: {id}\")\n return task_to_dict(task)", "def get_task(self, id):\n raise NotImplementedError()", "async def get_item(\n request: Request,\n response: Response,\n item_id: int,\n db: SAConnection = Depends(get_postgresql_connection)\n):\n cached_item = await request.app.extra['cache'].get_cache_item(item_id=item_id)\n if cached_item:\n return cached_item\n if db is None:\n response.status_code = 503\n return ResponseModel(result='Service unavailable')\n q = items.select().where(items.c.id == item_id)\n item = await db.fetchrow(query=q)\n if item is not None:\n item = Item(**item)\n await request.app.extra['cache'].set_cache_item(item=item)\n return item\n else:\n response.status_code = 404", "def _mongo_item_to_task(item):\n return Task(\n id=str(item[\"_id\"]),\n task=item[\"task\"],\n args=item[\"args\"],\n kwargs=item[\"kwargs\"],\n wait=item[\"wait\"],\n recurring=item[\"recurring\"],\n when=item[\"when\"],\n )", "def get_task(self, key: str) -> Task:\n raise NotImplementedError", "def get(self, item_id, class_id):\n return get_item_info_with_spell(item_id, class_id)", "def __get_task(self, task_id):\r\n if task_id not in self.__tasks:\r\n self.__tasks[task_id] = Task(task_id)\r\n return self.__tasks[task_id]", "def __get_task(self, task_id):\r\n if task_id not in self.__tasks:\r\n self.__tasks[task_id] = Task(task_id)\r\n return self.__tasks[task_id]", "def get_object(self):\n if not self._item:\n self._item = get_object_or_404(Item, pk=self.kwargs['item_id'])\n return self._item", "def get(self):\n current_user = fjwte.get_current_user()\n return Todo.get_items_by_user_id(current_user.id)", "def first(self) -> Task:\n return self._tasks[0]", "def get_task(id):\n\n if not id:\n raise InvalidAPIUsage(\"id is required\")\n\n collection = get_db_collection()\n\n task = get_task_or_404(collection, id)\n\n response = jsonify(content=task['content'])\n response.status_code = 200\n return response", "def get_task(self, u_name):\n raise NotImplementedError()", "def get_item(self, item_type):\n if item_type not in self._internal_type_mapping:\n return None\n else:\n return self._internal_type_mapping[item_type]", "def getTask(self, name):\n for t in self.tasks:\n if isinstance(name, str):\n if t.name == name:\n return t\n else:\n if t.__class__ is name:\n return t\n return None", "def getRawItem(self, itemID):\n data = self._client.Item.find(int(itemID))\n return data", "async def get(self):\n identifier = self.data[\"id\"]\n item = self.core.item_manager.items.get(identifier)\n if not item:\n return self.error(\n ERROR_ITEM_NOT_FOUND,\n f\"No item found with identifier {identifier}\", status_code=404)\n\n return self.json(data=list(item.actions.keys()))", "def workflow_fetch_item_task_spec(dtool_smb_config):\n return {\n 'item_id': {'key': 'search_dict_task->result'},\n 'source': 'smb://test-share/1a1f9fad-8589-413e-9602-5bbd66bfe675',\n 'filename': 'fetched_item.txt',\n 'dtool_config': dtool_smb_config,\n 'stored_data': True,\n }", "def find(self, task_id):\n for task_obj in self._blocked_items:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in limbo: '{}'\".format(task_id))", "def get_item(self, item: Inventory) -> Optional[OrderItem]:\n return self.orderitem_set.filter(item=item).first()", "def get_item_from_modulestore(usage_key, draft=False):\r\n store = modulestore('draft') if draft else modulestore('direct')\r\n return store.get_item(usage_key)", "async def get_task_result(task_id: TaskId):", "def _task_info_get(context, task_id, session=None):\n session = session or get_session()\n query = session.query(models.TaskInfo)\n query = query.filter_by(task_id=task_id)\n try:\n task_info_ref = query.one()\n except sa_orm.exc.NoResultFound:\n LOG.debug(\"TaskInfo was not found for task with id %(task_id)s\",\n {'task_id': task_id})\n task_info_ref = None\n\n return task_info_ref", "def _retrieve(cls, connection, uuid):\n resp = connection._get(get_url('task update', uuid=uuid))\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n raise_on_error(resp)\n return Task.from_json(connection, resp.json())", "def get_task_user():\n from olympia.users.models import UserProfile\n\n return UserProfile.objects.get(pk=settings.TASK_USER_ID)", "def read_item(\n db: Session = Depends(deps.get_db),\n item: models.Item = Depends(deps.get_owned_item_by_id),\n current_user: schemas.UserInDB = Depends(deps.get_current_active_user),\n) -> Any:\n return item", "def get_task_by_id(task_id):\n result = mongo.db.tasks.find({\"_id\": ObjectId(task_id)})\n return json_util.dumps(result)", "def get(self) -> Task: # pragma: no cover\n raise NotImplementedError", "def task(self, name):\n with self.db_lock:\n return self.rcon.hget(self.task_key, name)", "def _task_get(context, task_id, session=None, force_show_deleted=False):\n session = session or get_session()\n query = session.query(models.Task).options(\n sa_orm.joinedload(models.Task.info)\n ).filter_by(id=task_id)\n\n if not force_show_deleted and not context.can_see_deleted:\n query = query.filter_by(deleted=False)\n try:\n task_ref = query.one()\n except sa_orm.exc.NoResultFound:\n LOG.debug(\"No task found with ID %s\", task_id)\n raise exception.TaskNotFound(task_id=task_id)\n\n # Make sure the task is visible\n if not _is_task_visible(context, task_ref):\n msg = \"Forbidding request, task %s is not visible\" % task_id\n LOG.debug(msg)\n raise exception.Forbidden(msg)\n\n return task_ref", "def db_get_task(task_id):\n sql = \"SELECT * FROM {} WHERE id=?\".format(TABLE_NAME)\n return db_query(sql, (task_id,), True)", "def get_item(\n self, id_: Union[UUID, str], full_dataset: bool = True\n ) -> Optional[DatasetItem]:\n items = list(\n self.search_items(\n dataset_ids=[id_], full_dataset=full_dataset, order=ItemSort.UNSORTED\n )\n )\n if not items:\n return None\n if len(items) > 1:\n raise RuntimeError(\n \"Something is wrong: Multiple dataset results for a single UUID\"\n )\n\n [item] = items\n return item", "def get_item(self, index):\n if index == 0:\n raise IndexError(\"<{0}> Index start as 1\".format(type(self).__name__))\n index = self.get_index(index)\n res = self.get_item_type()()\n self.get_Item(index, res)\n return res", "def getItemData(itemId):\n return Gw2Spidy._request('item', str(itemId))['result']", "def get_task(self):\n return self.queue.get()", "def task(self, name):\n if name not in self._tasks:\n raise TaskNotFoundError\n\n return self._tasks[name]", "def get_task_by_id(self, task_id):\n task_record = self._read_transaction(tx.get_task_by_id, task_id=task_id)\n tuples = self._get_task_data_tuples([task_record])\n return _reconstruct_task(tuples[0][0], tuples[0][1], tuples[0][2], tuples[0][3],\n tuples[0][4])", "def get(self, itemId):\n\n tableRow = self.__queryTableRow(itemId)\n return self.__getItemFromTableRow(tableRow)", "def get_orderItem(self, itemId):\n for o in self.order_lst:\n return o.get_item(itemId)", "async def get_one(self, where):\n\n pass", "def read_item(\n *,\n db: Session = Depends(deps.get_db),\n id: int,\n current_user: models.User = Depends(deps.get_current_active_user),\n) -> Any:\n item = crud.item.get(db=db, id=id)\n if not item:\n raise HTTPException(status_code=404, detail='Item not found')\n if not crud.user.is_superuser(current_user) and (item.owner_id != current_user.id):\n raise HTTPException(status_code=400, detail='Not enough permissions')\n return item", "def get_task_by_id(id):\n\n\t# Open connection and execute SQL to get a task\n\ttry:\n\t\tdb, cursor = connect()\n\t\t\n\t\tcursor.execute(\"\"\"SELECT * FROM tasks \n\t\t\t\t\t\tWHERE id=%s\"\"\" % id)\n\n\t\ttask = cursor.fetchone()\n\n\t# Get error messages\n\texcept catch_error(), e:\n\t\tprint \"Error %d: %s\" % (e.args[0],e.args[1])\n\n\t# Close connection\n\tfinally:\n\t\tif db:\n\t\t\tdb.close()\n\n\treturn task", "def get_target(self):\n task = self.task.get_task(self.task_id)\n if 'name' in task:\n return str(task['name'])\n return str(task)", "async def get_one(self, pk):\n\n return await self._expand(await self.db.get_one(pk=pk))", "def get_item(self, usage_key, depth=0):\r\n store = self._get_modulestore_for_courseid(usage_key.course_key)\r\n return store.get_item(usage_key, depth)", "def get_item(self, item_id):\n for item in self.order_items:\n if item.get_itemId() == item_id:\n return item", "def get(self):\n gid = self.get_query_argument('gid', None)\n\n if gid: # get a specified task\n self.write(update_fields(\n self._rpc.aria2.tellStatus(self._token, gid, TASK_FIELDS)))\n\n else: # get all tasks\n active_tasks = self._rpc.aria2.tellActive(self._token, TASK_FIELDS)\n waiting_tasks = self._rpc.aria2.tellWaiting(\n self._token, -1, 100, TASK_FIELDS)\n stopped_tasks = self._rpc.aria2.tellStopped(\n self._token, -1, 100, TASK_FIELDS)\n all_tasks = [\n update_fields(task) for task in\n itertools.chain(active_tasks, waiting_tasks, stopped_tasks)\n ]\n self.write({'tasks': all_tasks})", "def get_item(self, item_id):\n test_info = db.get_test(item_id)\n if not test_info:\n pecan.abort(404)\n test_list = db.get_test_results(item_id)\n test_name_list = [test_dict[0] for test_dict in test_list]\n return {\"cpid\": test_info.cpid,\n \"created_at\": test_info.created_at,\n \"duration_seconds\": test_info.duration_seconds,\n \"results\": test_name_list}", "def task_get(context, task_id, session=None, force_show_deleted=False):\n task_ref = _task_get(context, task_id, session=session,\n force_show_deleted=force_show_deleted)\n return _task_format(task_ref, task_ref.info)", "def find_by_task(self, task, params={}, **options):\n path = \"/tasks/%s/attachments\" % (task)\n return self.client.get_collection(path, params, **options)", "def task_item(self, grab, task):\n if self.file_exist(self.get_id(grab.doc.url)) \\\n and not config['rewrite_files']:\n logging.info(\"Item will not parse since file exists: %s.%s page:%s\"\n % (self.get_id(grab.doc.url),\n self.type_file,\n task.page)\n )\n return\n\n logging.debug(\"Begining item parsing: %s\" % grab.doc.url)\n json_info = {}\n realtime_found = None\n try:\n realtime_found = grab.doc.rex_text(\"'boatBanner'\\s*:\\s*'(.*?)',\")\n except DataNotFound:\n logging.warning(\n \"Repeat... 'boatBanner' for realtimeavibility not found in: %s\"\n % grab.doc.url\n )\n # Repeat task\n yield Task('item', url=grab.config['url'],\n task_try_count=task.task_try_count + 1)\n\n data_boat = grab.doc.select('//span[@class=\"wishlist-btn ' +\n 'js-wishlist-toggle boatview__wishlist\"]')\n try:\n json_info = json.loads(data_boat.attr('data-boat'))\n except json.decoder.JSONDecodeError:\n logging.warning(\"Json decode error for data-boat in: %s\"\n % grab.doc.url)\n # Repeat task\n yield Task('item', url=grab.config['url'],\n task_try_count=task.task_try_count + 1)\n except IndexError:\n logging.warning(\"span js-wishlist-toggle... not found in: %s\"\n % grab.doc.url)\n # Repeat task\n yield Task('item', url=grab.config['url'],\n task_try_count=task.task_try_count + 1)\n\n if len(json_info) < 1 or realtime_found is None:\n return\n\n realtime = True if realtime_found == 'realtime' else False\n info = OrderedDict()\n info['url'] = grab.doc.url\n info['title'] = self.get_title(grab)\n info['parsingdate'] = datetime.now().strftime('%H:%M %d/%m/%y')\n info['realtimeavilbility'] = realtime\n\n location = json_info['location']\n info['location'] = OrderedDict([\n ('country', location.split(', ')[0]),\n ('city', location.split(', ')[1])\n ])\n\n data = OrderedDict(info)\n\n data['year'] = self.get_year(grab)\n data['length'] = json_info['length'].replace(' ', '')\n\n guests = self.get_guests(grab, json_info)\n if guests is not None:\n data['guests'] = int(guests)\n data['type'] = grab.doc.rex_text(\"'type': '(.+?)',\")\n\n engine_value = self.get_engine(grab)\n if engine_value is not None:\n data['engine'] = engine_value\n\n sleeps = self.get_sleeps(grab)\n if sleeps is not None:\n data['sleeps'] = sleeps\n\n cabins = self.get_cabins(grab, json_info)\n if cabins is not None:\n data['cabins'] = cabins\n\n bathrooms = self.find_boatview__stats(grab, 'Bathrooms')\n if bathrooms is not None:\n data['bathrooms'] = int(bathrooms)\n else:\n logging.debug(\"Bathrooms for 'bathrooms' not found in: %s\"\n % grab.doc.url)\n\n about = self.get_about(grab)\n if about is None:\n logging.debug(\"About for 'about' not found in: %s\"\n % grab.doc.url)\n data['about'] = about if about is not None else ''\n data['photos'] = self.get_images_urls(grab)\n\n inventory = self.get_inventory(grab)\n if inventory is not None:\n data['inventory'] = inventory\n\n data['pickup'] = self.get_pickup(grab)\n\n equipment = self.get_equipment(grab)\n if len(equipment) < 1:\n logging.debug(\"equipment not found in: %s\"\n % grab.doc.url)\n else:\n data['equipment'] = equipment\n\n prices = self.get_prices(grab, 'Obligatory extras')\n optional = self.get_prices(grab, 'Optional extras')\n if prices is not None:\n data['prices'] = OrderedDict([\n ('obligatory', prices),\n ])\n if optional is not None:\n data['optional'] = optional\n\n if self.file_exist(self.get_id(grab.doc.url)) \\\n and not config['rewrite_files']:\n logging.info(\"Item will not save since file exists: %s.%s\"\n % (self.get_id(grab.doc.url), self.type_file)\n )\n return\n\n # If elements more than 10 then save results into json-format\n if len(data) > 9:\n logging.debug(\"Saving url: %s from page: %s\"\n % (grab.doc.url, task.page))\n self.save_result(\n self.get_id(grab.doc.url),\n json.dumps(data, ensure_ascii=False, indent=2)\n )\n else:\n logging.info(\n \"Data hasn't been saved. It contains less 10 objects: %s.%s\"\n % (self.get_id(grab.doc.url), self.type_file)\n )\n # Repeat task\n yield Task('item', url=grab.config['url'],\n task_try_count=task.task_try_count + 1)", "def __getitem__(self, item: str) -> Account:\n return self.accounts[item]", "def item(self, item_id):\n response = self._request(V2_ENDPOINTS['ITEMS'] + item_id)\n return response", "def get_item_detail(item_id):\n pass", "def get_task(id):\n\n task = mycelery.AsyncResult(id)\n\n if task and task.info:\n return jsonify({\n 'id': task.id,\n 'name': task.info['name'],\n 'total': task.info['total'],\n 'current': task.info['current'],\n 'complete': task.info['complete'],\n 'errors': task.info['errors'],\n 'errors_count': task.info['errors_count'],\n 'status': task.info['status']\n })\n\n return 'Não existem dados', 400", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def get_task(self,\n task_label=None,\n notebook_cell_text=None,\n print_return=True):\n\n self._print('Getting task {} ...'.format(task_label))\n\n if task_label:\n task = {task_label: self._tasks[task_label]}\n\n elif notebook_cell_text:\n task = self._load_task_from_notebook_cell(notebook_cell_text)\n\n else:\n raise ValueError(\n 'Get an existing task by querying for its ID or register a '\n 'task from a notebook cell.')\n\n if print_return: # For communicating with JavaScript\n print(dumps(task))\n return task", "def item(item_id):\n kwargs = {k: parse(v) for k, v in request.args.to_dict().items()}\n\n try:\n trading = Trading(**kwargs)\n except ConnectionError as err:\n result = str(err)\n status = 500\n else:\n response = trading.get_item(item_id)\n result = response['Item']\n status = 200\n\n return jsonify(status, objects=result)" ]
[ "0.6219642", "0.6196074", "0.6165974", "0.6165974", "0.61451584", "0.61449784", "0.61061776", "0.6070151", "0.6032105", "0.6015247", "0.6007213", "0.59836924", "0.59652644", "0.59573513", "0.59348106", "0.5902991", "0.5900254", "0.58807874", "0.58392173", "0.58383256", "0.5837305", "0.580728", "0.57932675", "0.5767161", "0.5763113", "0.5761524", "0.5744938", "0.5731988", "0.5706495", "0.56801385", "0.56754774", "0.5659776", "0.56520575", "0.56404376", "0.5637687", "0.56248206", "0.5589257", "0.5583347", "0.55680543", "0.55665743", "0.5565227", "0.55622965", "0.5558775", "0.5543443", "0.5536009", "0.5536009", "0.55261904", "0.55215734", "0.55192345", "0.5504938", "0.54972804", "0.5479369", "0.5465672", "0.5464379", "0.54623103", "0.54215264", "0.5418197", "0.5416376", "0.53923225", "0.538743", "0.53792137", "0.5377246", "0.5375941", "0.5370428", "0.53592414", "0.5349529", "0.5344184", "0.53371507", "0.5329789", "0.53260833", "0.5324724", "0.5321135", "0.5311958", "0.53013617", "0.5300261", "0.52865875", "0.52730125", "0.52697194", "0.52687764", "0.52639973", "0.52618086", "0.5259553", "0.525004", "0.5240394", "0.5238838", "0.5228844", "0.5225619", "0.5218555", "0.52178776", "0.5216171", "0.5207342", "0.5205921", "0.5203913", "0.520244", "0.520244", "0.520244", "0.520244", "0.520244", "0.52007645", "0.5200255" ]
0.676394
0
Query for a specific subcommand.
def __init__(self, subcommand: Subcommand, query: str): self.command = subcommand self.query = query
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_subcommand(self, name):\n try:\n subcommand_class = self.subcommands[name]\n except KeyError:\n self.print_command_unkown_error(name)\n sys.exit(1)\n return subcommand_class(self.prog, name, self.argv[2:], self.stdout)", "def fetch_command(self, global_options, subcommand):\r\n commands = self.get_commands(global_options)\r\n try:\r\n klass = commands[subcommand]\r\n except KeyError:\r\n sys.stderr.write(\"Unknown command: %r\\nType '%s help' for usage.\\nMany commands will only run at project directory, maybe the directory is not right.\\n\" % \\\r\n (subcommand, self.prog_name))\r\n sys.exit(1)\r\n return klass", "def get_subcmd(self, name: str) -> \"CommandHelp\":\n try:\n return self.subcmds[name]\n except KeyError:\n # Try looking up by alias\n for sub_name, sub_help in self.subcmds.items():\n for alias in sub_help.aliases:\n if name == alias:\n return self.subcmds[sub_name]\n raise", "def _find_subcommand(args):\n subcmd = args[1]\n if subcmd in [\n \"cfg\"\n # , 'init',\n ]:\n return subcmd\n else:\n return None", "def subcmd(self) -> Optional[str]:\n return self._subcmd", "def find(sub, arg):\n\n if sub == 'collections':\n res = api.find_collections(arg)\n elif sub == 'domains':\n res = api.find_domains(arg)\n elif sub == 'problems':\n res = api.find_problems(arg)\n else:\n print(\"Error: Unrecognized sub-command, {0}\".format(sub))\n exit(1)\n\n pprint.pprint(res)", "def execute(self):\n\n options, args = self.parser.parse_args(self.argv)\n\n try:\n subcommand_name = self.argv[1]\n except IndexError:\n subcommand_name = 'help'\n\n if subcommand_name == 'help':\n if len(args) <= 2:\n self.print_help()\n else:\n self.fetch_subcommand(self.argv[2]).print_help()\n elif subcommand_name == 'version':\n self.print_version()\n else:\n self.fetch_subcommand(subcommand_name).execute()", "def cli():\n pass # do nothing here, it just defines the name for other subcommands", "def fetch_command(self, subcommand):\n # Get commands outside of try block to prevent swallowing exceptions\n commands = get_commands()\n try:\n app_name = commands[subcommand]\n except KeyError:\n possible_matches = get_close_matches(subcommand, commands)\n sys.stderr.write(\"Unknown command: %r\" % subcommand)\n if possible_matches:\n sys.stderr.write(\". Did you mean %s?\" % possible_matches[0])\n sys.stderr.write(\"\\nType '%s help' for usage.\\n\" % self.prog_name)\n sys.exit(1)\n if isinstance(app_name, BaseCommand):\n # If the command is already loaded, use it directly.\n klass = app_name\n else:\n klass = load_command_class(app_name, subcommand)\n return klass", "def _subcommand_for_name(self, name):\n for subcommand in self.subcommands:\n if name == subcommand.name or \\\n name in subcommand.aliases:\n return subcommand\n return None", "def query_cmdline():", "def get_subcommand_query(query_str: str) -> Optional[SubcommandQuery]:\n if not query_str:\n return None\n\n # spilt:\n # \"subcommand_name rest of query\" -> [\"subcommand_name\", \"rest of query\"\"]\n query_parts = query_str.strip().split(None, maxsplit=1)\n\n if len(query_parts) < 2:\n query_str = \"\"\n else:\n query_str = query_parts[1]\n\n subcommand = get_subcommand_for_name(query_parts[0])\n if subcommand:\n return SubcommandQuery(subcommand=subcommand, query=query_str)", "async def command_proc(self, message):\n parser = DiscordArgumentParser(description=\"A Test Command\", prog=\">stats\")\n parser.set_defaults(message=message)\n sp = parser.add_subparsers()\n\n sub_parser = sp.add_parser('user',\n description='test something')\n sub_parser.add_argument(\n \"user_id\",\n action=ValidUserAction,\n help=\"Mention of the user in question\",\n metavar=\"@user\",\n nargs=\"?\",\n )\n sub_parser.set_defaults(cmd=self._cmd_user)\n\n sub_parser = sp.add_parser('global',\n description='test something')\n sub_parser.set_defaults(cmd=self._cmd_global)\n\n try:\n self.log.info(\"Parse Arguments\")\n results = parser.parse_args(shlex.split(message.content)[1:])\n self.log.info(results)\n if type(results) == str:\n self.log.info(\"Got normal return, printing and returning\")\n self.log.info(type(results))\n await self.client.send_message(message.channel, results)\n return\n elif hasattr(results, 'cmd'):\n await results.cmd(results)\n return\n else:\n msg = parser.format_help()\n await self.client.send_message(message.channel, msg)\n return\n except NoValidCommands as e:\n # We didn't get a subcommand, let someone else deal with this mess!\n self.log.error(\"???\")\n pass\n except HelpNeeded as e:\n self.log.info(\"TypeError Return\")\n self.log.info(e)\n msg = f\"{e}. You can add `-h` or `--help` to any command to get help!\"\n await self.client.send_message(message.channel, msg)\n return\n pass\n\n return", "def get_subcommand_for_name(name: str) -> Optional[Subcommand]:\n matching = [s for s in subcommands if s.name.lower() == name.lower()]\n if matching:\n return matching[0]", "def execute(self):\n try:\n subcommand = self.argv[1]\n except IndexError:\n subcommand = \"help\" # Display help if no arguments were given.\n\n # These options could affect the commands that are available, so they\n # must be processed early.\n parser = CommandParser(\n prog=self.prog_name,\n usage=\"%(prog)s subcommand [options] [args]\",\n add_help=False,\n allow_abbrev=False,\n )\n parser.add_argument(\"args\", nargs=\"*\") # catch-all\n try:\n options, args = parser.parse_known_args(self.argv[2:])\n except CommandError:\n pass # Ignore any option errors at this point.\n\n if subcommand == \"help\":\n if \"--commands\" in args:\n sys.stdout.write(self.main_help_text(commands_only=True) + \"\\n\")\n elif not options.args:\n sys.stdout.write(self.main_help_text() + \"\\n\")\n else:\n self.fetch_command(options.args[0]).print_help(\n self.prog_name, options.args[0]\n )\n elif subcommand == \"version\" or self.argv[1:] == [\"--version\"]:\n sys.stdout.write(get_named_version() + \"\\n\")\n elif self.argv[1:] in ([\"--help\"], [\"-h\"]):\n sys.stdout.write(self.main_help_text() + \"\\n\")\n else:\n self.fetch_command(subcommand).run_from_argv(self.argv)", "def test_handle_subcommand_help(self):\r\n subcommands = list(self.testcommand.subparser.choices.keys())\r\n for subcommand in subcommands:\r\n command = f\"project {subcommand} --help\"\r\n ret, code = self.testcommand.handle(command, user)\r\n self.assertEqual(1, ret.count(\"usage\"))\r\n self.assertEqual(code, 200)\r\n\r\n command = f\"project {subcommand} -h\"\r\n ret, code = self.testcommand.handle(command, user)\r\n self.assertEqual(1, ret.count(\"usage\"))\r\n self.assertEqual(code, 200)\r\n\r\n command = f\"project {subcommand} --invalid argument\"\r\n ret, code = self.testcommand.handle(command, user)\r\n self.assertEqual(1, ret.count(\"usage\"))\r\n self.assertEqual(code, 200)", "def get_command(pid):", "def fetch_command(self, subcommand):\n try:\n app_name = get_commands()[subcommand]\n except KeyError:\n sys.stderr.write(\"Unknown command: %r\\nType '%s help'\"\n \" for usage.\\n\" % \\\n (subcommand, self.prog_name))\n sys.exit(1)\n if isinstance(app_name, BaseCommand):\n # If the command is already loaded, use it directly.\n klass = app_name\n else:\n klass = load_command_class(app_name, subcommand)\n return klass", "def show(sub, arg):\n\n arg = int(arg)\n\n if sub == 'collection':\n res = api.get_collection(arg)\n elif sub == 'domain':\n res = api.get_domain(arg)\n elif sub == 'problem':\n res = api.get_problem(arg)\n elif sub == 'plan':\n res = api.get_plan(arg)\n else:\n print(\"Error: Unrecognized sub-command, {0}\".format(sub))\n exit(1)\n\n pprint.pprint(res)", "def test_handle_subcommand_help(self):\n subcommands = list(self.testcommand.subparser.choices.keys())\n for subcommand in subcommands:\n command = f\"team {subcommand} --help\"\n ret, code = self.testcommand.handle(command, user)\n self.assertEqual(1, ret.count(\"usage\"))\n self.assertEqual(code, 200)\n\n command = f\"team {subcommand} -h\"\n ret, code = self.testcommand.handle(command, user)\n self.assertEqual(1, ret.count(\"usage\"))\n self.assertEqual(code, 200)\n\n command = f\"team {subcommand} --invalid argument\"\n ret, code = self.testcommand.handle(command, user)\n self.assertEqual(1, ret.count(\"usage\"))\n self.assertEqual(code, 200)", "def test_get_subcommand_help(self):\n subcommands = list(self.testcommand.subparser.choices.keys())\n for subcommand in subcommands:\n help_message = self.testcommand.get_help(subcommand=subcommand)\n self.assertEqual(1, help_message.count(\"usage\"))", "def run_subcommand(args):\n try:\n command = args.__dict__.pop(\"command\")\n command.execute(**args.__dict__)\n except (CommandLineException, KeyError) as error:\n print(colors.red(error))\n sys.exit(getattr(error, 'exit_code', 1))", "def print_help(self, prog_name, subcommand):\r\n parser = self.create_parser(prog_name, subcommand)\r\n parser.print_help()", "def test_get_subcommand_help(self):\r\n subcommands = list(self.testcommand.subparser.choices.keys())\r\n for subcommand in subcommands:\r\n help_message = self.testcommand.get_help(subcommand=subcommand)\r\n self.assertEqual(1, help_message.count(\"usage\"))", "def add_subcommand(self, cmd):\n self.subcommands.append(cmd)", "def main(ctx):\n\n print(\"Mode:\", ctx.invoked_subcommand)", "def print_help(self, prog_name, subcommand):\n parser = self.create_parser(prog_name, subcommand)\n parser.print_help()", "def print_help(self, prog_name, subcommand):\n parser = self.create_parser(prog_name, subcommand)\n parser.print_help()", "def print_help(self, prog_name, subcommand):\n parser = self.create_parser(prog_name, subcommand)\n parser.print_help()", "def run(query, confirmation):\n\n query = query.lower()\n\n if (query.isnumeric()):\n if (confirmation == None):\n confirmation = False\n\n matches = [commands[int(query) - 1]]\n else:\n if (confirmation == None):\n confirmation = False\n # Search in commands return list of all matches\n # matches = [x for x in commands if (query in x['command'].lower()) or (query in x['description'].lower())]\n matches = [x for x in commands if (query == x['alias'])]\n \n for match in matches:\n if (confirmation):\n answer = click.confirm('Run ' + Fore.MAGENTA + match['command'] + Fore.RESET, default=True)\n\n if (confirmation == False or answer):\n execCommand(match)\n break", "def find_command(cmd):\n if cmd:\n root = '.'.join([COMMANDS_PACKAGE_NAME] + cmd)\n else:\n root = COMMANDS_PACKAGE_NAME\n try:\n return _get_commands(root)['__module__'].COMMAND\n except KeyError:\n LOGGER.debug('%r not recognized as a TAU command', cmd)\n resolved = _resolve(cmd, cmd, _COMMANDS[SCRIPT_COMMAND])\n LOGGER.debug('Resolved ambiguous command %r to %r', cmd, resolved)\n return find_command(resolved)\n except AttributeError as err:\n raise InternalError(\"'COMMAND' undefined in %r\" % cmd) from err", "def work(self):\n\n cmd = self.options.command\n cmdargs = self.options.args\n\n # find function\n fname = \"cmd_\" + cmd.replace('-', '_')\n if not hasattr(self, fname):\n self.log.error('bad subcommand, see --help for usage')\n sys.exit(1)\n fn = getattr(self, fname)\n\n b = inspect.signature(fn).bind(*cmdargs)\n\n fn(*b.args, **b.kwargs)", "def cmd(self, cmd):\n return cmd", "def get_command(self, object_name, user_key = None):\n\t\treturn self.get_object('command',object_name, user_key = user_key)", "def do_help(self, args): \n if args.command:\n if len(args.command) > 1:\n command = args.command[0] +\"-\" + args.command[1]\n else:\n command = args.command[0]\n if command in self.subcommands:\n self.subcommands[command].print_help()\n else:\n print((\"'%s' is not a valid subcommand\") %\n args.command)\n else:\n self.parser.print_help()", "def commands_for_submode(prefix):\n candidates = _lookup_command_candidates(prefix, command_registry)\n # print type(candidates), [x['self'] for x in candidates]\n # print [x.get('all-help') for x in candidates]\n if debug.cli():\n # print 'commands_for_submode:', sorted(dict([[x['name']['title'] if type(x['name']) == dict else x['name'], None] for x in candidates]).keys())\n pass\n return candidates", "def subcommand(wrapped):\n def callback(scanner, name, ob):\n scanner.subcommands[ob.name] = ob\n venusian.attach(wrapped, callback, category='subcommands')\n return wrapped", "def extend_cli(self, subparser):", "def subcommands(self) -> list[\"ProjectCommand\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"subcommands\", _args)\n _ctx = ProjectCommand(_ctx)._select_multiple(\n _description=\"description\",\n _name=\"name\",\n _result_type=\"resultType\",\n )\n return _ctx.execute_sync(list[ProjectCommand])", "def do_help(self, args):\n if getattr(args, 'command', None):\n if args.command in self.subcommands:\n self.subcommands[args.command].print_help()\n else:\n raise CommandError(\"'%s' is not a valid subcommand\" %\n args.command)\n else:\n self.parser.print_help()", "def do_help(self, args):\n if getattr(args, 'command', None):\n if args.command in self.subcommands:\n self.subcommands[args.command].print_help()\n else:\n raise exc.CommandError(\"'%s' is not a valid subcommand\" %\n args.command)\n else:\n self.parser.print_help()", "def do_help(self, args):\n if getattr(args, 'command', None):\n if args.command in self.subcommands:\n self.subcommands[args.command].print_help()\n else:\n raise exc.CommandError(\"'%s' is not a valid subcommand\" %\n args.command)\n else:\n self.parser.print_help()", "def query(self, command: str) -> str:\n return self._dmm.query(command).rstrip()", "def cmd(self):", "async def custom(self, ctx):\n if ctx.invoked_subcommand is None:\n raise commands.CommandNotFound(\"Subcommand '{}' does not exist.\".format(ctx.subcommand_passed))", "def do_help(self, args):\n if getattr(args, 'command', None):\n if args.command in self.subcommands:\n self.subcommands[args.command].print_help()\n else:\n raise Exception(\"'%s' is not a valid subcommand\" %\n args.command)\n else:\n self.parser.print_help()", "def test_get_invalid_subcommand_help(self):\r\n self.assertEqual(self.testcommand.get_help(),\r\n self.testcommand.get_help(subcommand=\"foo\"))", "def known_command(self, command):\n return self._known_command(command, self.do_command)", "def get_cmd(self, command):\n return self.commands[command][\"cmd\"]", "def run(self):\n if self.subcommand_parser is None:\n self.exit(1, 'Command defines no subcommands')\n\n args = self.parse_args()\n if args.command is None:\n self.exit(1, 'No command selected')", "def get_command(self, ctx, cmd_name):\n cmd_name = self.MAP.get(cmd_name, cmd_name)\n return click.Group.get_command(self, ctx, cmd_name)", "def test_get_invalid_subcommand_help(self):\n self.assertEqual(self.testcommand.get_help(),\n self.testcommand.get_help(subcommand=\"foo\"))", "def _command(self, *cmd, handler=None):", "def executeCommand(cmd, cmd_args=[]):\n def _resolve(c, d):\n if not c: \n return []\n car, cdr = c[0], c[1:]\n try:\n matches = [(car, d[car])]\n except KeyError:\n matches = [i for i in d.iteritems() if i[0].startswith(car)]\n if len(matches) == 1:\n return [matches[0][0]] + _resolve(cdr, matches[0][1])\n elif len(matches) == 0:\n raise UnknownCommandError(' '.join(cmd))\n elif len(matches) > 1:\n raise AmbiguousCommandError(' '.join(cmd), [m[0] for m in matches])\n\n while len(cmd):\n root = '.'.join([__name__] + cmd)\n try:\n main = getCommands(root)['__module__'].main\n except KeyError:\n LOGGER.debug('%r not recognized as a TAU command' % cmd)\n try:\n resolved = _resolve(cmd, _commands[__name__])\n except UnknownCommandError:\n if len(cmd) <= 1: \n raise # We finally give up\n parent = cmd[:-1]\n LOGGER.debug('Getting help from parent command %r' % parent)\n return executeCommand(parent, ['--help'])\n else:\n LOGGER.debug('Resolved ambiguous command %r to %r' % (cmd, resolved))\n return executeCommand(resolved, cmd_args)\n except AttributeError:\n raise InternalError(\"'main(argv)' undefined in command %r\" % cmd)\n else:\n return main(cmd_args)", "def usage(self, subcommand):\n usage = '%%prog %s [options] %s' % (subcommand, self.args)\n if self.help:\n return '%s\\n\\n%s' % (usage, self.help)\n else:\n return usage", "def get_command(self, ctx, cmd_name):\n path = \"%s.%s\" % (__name__, cmd_name)\n path = path.replace(\"-\", \"_\")\n try:\n module = importlib.import_module(path)\n return getattr(module, 'cli')\n except ModuleNotFoundError as ex:\n print(ex.name)\n return None", "def main(ctx: typer.Context):\n LOG.debug(F\"COVIDAP: executing command: {ctx.invoked_subcommand}\")", "def _query_commands(self):\n # TODO: make this work\n self.player.respond(\"Hi there! Ask me to play artists or songs. \"\n \"I can also find songs that are similar to other \"\n \"artists.\")", "def subcmd_help(word, word_eol):\n\tif len(word) > 1:\n\t\ttopic = word[1]\n\t\tif topic in subcommands:\n\t\t\tdoprint('help', subcommands[topic].__doc__)\n\t\telse:\n\t\t\tdoprint('help', 'Unknown subcommand \"%s\". Try \"/mt_irc help\".' % topic)\n\telse:\n\t\tfor subcmd in subcommands:\n\t\t\tdoprint('help', subcommands[subcmd].__doc__)", "def DispatchCommand(command, options, args, command_map=None):\n if command_map is None:\n command_map = gclient_command_map\n\n if command in command_map:\n return command_map[command](options, args)\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n command)", "def execute(self):\n # Preprocess options to extract --settings and --pythonpath.\n # These options could affect the commands that are available, so they\n # must be processed early.\n parser = LaxOptionParser(usage=\"%prog subcommand [options] [args]\",\n version=magpy.get_version(),\n option_list=BaseCommand.option_list)\n self.autocomplete()\n try:\n options, args = parser.parse_args(self.argv)\n handle_default_options(options)\n except:\n # Ignore any option errors at this point.\n pass # pylint: disable-msg=W0702\n\n try:\n subcommand = self.argv[1]\n except IndexError:\n subcommand = 'help' # Display help if no arguments were given.\n\n if subcommand == 'help':\n if len(args) <= 2:\n parser.print_lax_help()\n sys.stdout.write(self.main_help_text() + '\\n')\n elif args[2] == '--commands':\n sys.stdout.write(\n self.main_help_text(commands_only=True) + '\\n')\n else:\n self.fetch_command(args[2]).print_help(self.prog_name, args[2])\n elif subcommand == 'version':\n sys.stdout.write(parser.get_version() + '\\n')\n # Special-cases: We want 'django-admin.py --version' and\n # 'django-admin.py --help' to work, for backwards compatibility.\n elif self.argv[1:] == ['--version']:\n # LaxOptionParser already takes care of printing the version.\n pass\n elif self.argv[1:] in (['--help'], ['-h']):\n parser.print_lax_help()\n sys.stdout.write(self.main_help_text() + '\\n')\n else:\n self.fetch_command(subcommand).run_from_argv(self.argv)", "def unknown_command(s, subcommand, parser):\n return SlackResponseText(\"unknown subcommand '{}'; try 'help'\".format(subcommand))", "def process_cmd(config, cmd):\n # Separate command from arguments\n cmd_parts = cmd.split(' ', 1)\n head = cmd_parts[0]\n args = ''\n if len(cmd_parts) == 2:\n args = cmd_parts[1]\n\n # Call the command\n if not common.call_cmd(head, config, args):\n print(\"RabbitHole: Unknown command '{}'\".format(head))", "def __getitem__(self, cmd):\n assert isinstance(cmd, str)\n name = cmd[len(self.predicate) :]\n # Check that command is valid and not private,\n # protected or special method and attribute for it exists\n if (\n cmd.startswith(self.predicate)\n and not cmd.startswith(self.predicate + \"_\")\n and hasattr(self, name)\n ):\n item = self.__getattribute__(name)\n if callable(item):\n return item\n # If command not found, return help\n return partial(self.help, fail=\"No such command\")", "def _extract_command(self, args):\n opts = self.gopts[:]\n for cmd in self.ctable.values():\n opts.extend(cmd.opts)\n sfl, lfl, _ = self._compute_flags(opts, check_conflicts=False)\n\n lopts,largs = getopt.getopt(args, sfl, lfl)\n if not largs:\n return None\n return self._command(largs[0])", "def run_command(self, commandString):\n commandWords = commandString.split(\" \")\n commandWords = filter(len, map(lambda x: x.strip(), commandWords))\n command = commandWords[0]\n parameters = commandWords[1:]\n if command in self.commands.keys():\n return self.commands[command](*parameters)\n else:\n if command in self.aliases.keys():\n (aliasCommand, aliasParameters) = self.aliases[command]\n parameters = list(aliasParameters) + parameters\n if aliasCommand in self.commands.keys():\n return self.commands[aliasCommand](*parameters)\n else:\n cprint(\"Command not found: %s (%s)\" % (command, commandString), 'red')", "def usage(self, subcommand):\r\n if len(self.option_list) > 0:\r\n usage = '%%prog %s [options] %s' % (subcommand, self.args)\r\n else:\r\n usage = '%%prog %s %s' % (subcommand, self.args)\r\n if self.help:\r\n return '%s\\n\\n%s' % (usage, self.help)\r\n else:\r\n return usage", "async def admin(ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(\"Invalid Command\")", "def command():\n pass", "def get_command(self, command_name: str):\n self._bot.all_commands.get(command_name, None)", "def get_command(self, cmd_str):\n try:\n return self.commands[cmd_str]\n except KeyError:\n raise ServerException('invalid command')", "def test_handle_multiple_subcommands(self):\r\n ret, code = self.testcommand.handle(\"project list edit\", user)\r\n self.assertEqual(ret, self.testcommand.get_help())\r\n self.assertEqual(code, 200)", "def get_command_with_name(self, command_name):\n return self.commands[command_name]", "def help(self, irc, msg, args, command):\n command = map(callbacks.canonicalName, command)\n (maxL, cbs) = irc.findCallbacksForArgs(command)\n if maxL == command:\n if len(cbs) > 1:\n names = sorted([cb.name() for cb in cbs])\n irc.error(format('That command exists in the %L plugins. '\n 'Please specify exactly which plugin command '\n 'you want help with.', names))\n else:\n assert cbs, 'Odd, maxL == command, but no cbs.'\n irc.reply(cbs[0].getCommandHelp(command, False))\n else:\n irc.error(format('There is no command %q.',\n callbacks.formatCommand(command)))", "def command(self, command, value=1, callback=None,\n check=True, allowable_errors=[], **kwargs):\n\n if isinstance(command, basestring):\n command = SON([(command, value)])\n\n command.update(kwargs)\n\n self.connection(\"$cmd\").find_one(command,callback=callback,\n _must_use_master=True,\n _is_command=True)", "def query(self, cmd, raw=False):\n url = self.__baseurl.format(cmd)\n req = self.session.get(url)\n if not req.ok:\n req.raise_for_status()\n\n return req.text if raw else to_dict(req.text)", "def _subexec(command):\n lcwd = fabric.state.env.get('lcwd', None) or None #sets lcwd to None if it bools to false as well\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=lcwd)\n out, err = process.communicate()\n print \"command : %s \" % command\n print \"out: %s\" % out\n print \"err: %s\" % err", "def _invoke_cmd(self, cmd):\n if cmd in self.COMMANDS:\n self.COMMANDS[cmd]()\n else:\n print(ERROR_UNKNOWN_COMMAND.format(cmd=cmd))", "def get_command(self,command):\n\t\treturn self.command_handlers[command]", "def test_handle_multiple_subcommands(self):\n ret, code = self.testcommand.handle(\"team list edit\", user)\n self.assertEqual(ret, self.testcommand.get_help())\n self.assertEqual(code, 200)", "def market(ctx):\n if ctx.invoked_subcommand is None:\n logger.info(ctx.command.get_help(ctx))", "def pyscript_subcommand():", "def _get_command(self, command_name):\n try:\n return self._commands[command_name]\n except KeyError:\n raise UnsupportedCommand(\n \"Command: {} not supported\".format(command_name)\n )", "def handle_admincommands(bot, ievent):\n cmnds = getcmndtable()\n if not ievent.rest: ievent.reply(\"commands: \", cmnds)\n else:\n try: ievent.reply(\"%s command is found in %s \" % (ievent.rest, cmnds[ievent.rest]))\n except KeyError: ievent.reply(\"no such commands available\")", "def get_command(command):\n for _cmd in commands:\n if _cmd.command == command:\n return _cmd\n raise UserWarning(\"telegram command not found.\")", "def setup_subcommands(argparser):\n\n subparsers = argparser.add_subparsers()\n\n parser_info = subparsers.add_parser('info', help = 'Provide the information about the user')\n parser_info.add_argument('user', help = 'The user to inspect')\n\n parser_ownerships = subparsers.add_parser('ownerships', help = 'Show items which this user owns')\n parser_ownerships.add_argument('user', help = 'The name of the user to show information about')\n parser_ownerships.add_argument('-r', '--recursive', action = 'store_true', help = 'Show items which this user own through being in lists')\n \n parser_info.set_defaults(handler = show_info)\n parser_ownerships.set_defaults(handler = show_ownerships)", "def test_no_query_passed(self, api_client):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.helper.sys\") as sys:\n sys.stdin.isatty.return_value = True\n result = runner.invoke(\n subcommand.query, parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise query\" in result.output\n api_client.query.assert_not_called()", "def _run_command(self, opts, args):\r\n cmd = self.search_commands(args[0])\r\n\r\n if opts.debug:\r\n LOGGER.setLevel(logging.DEBUG)\r\n LERR.setLevel(logging.DEBUG)\r\n\r\n if not (opts.nologo or cmd.nologo) and not self.interactive:\r\n sys.stdout.write(FIPSSTR)\r\n CLI.version(self._progname, versioning.__version__,\\\r\n versioning.__extracontent__, fileh=sys.stdout)\r\n if len(args) > 1:\r\n return cmd.run(args[1:])\r\n\r\n return cmd.run([])", "def get_command(self, ctx: Context, cmd_name: str) -> Command:\n next_command = self.command.pop(0)\n if not self.command:\n return DocsBaseCommand(\n name=next_command,\n short_help=f\"Documentation for {self.command_string}\",\n callback=self.command_callback,\n )\n return DocsSubCommand(command=self.command)", "def find_subcommands():\n clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n subclasses = [cls for cls in clsmembers if issubclass(cls[1], (Subcommand, LocalSubcommand))\n and cls[1] not in [Subcommand, LocalSubcommand]]\n\n subcommands = []\n for subclass in subclasses:\n name = '-'.join([part.lower() for part in re.findall('[A-Z][a-z]*', subclass[0])])\n subcommands.append((name, subclass[1]))\n return subcommands", "def find_command(self, command: str):\n layers = list(self.command_layers)\n while layers and not layers[0].active:\n del layers[0]\n\n if not layers:\n return\n\n last = None\n for layer in layers:\n last = layer\n if match := layer.find_command(command):\n return match\n\n if last:\n return last.cannot_find(command)", "async def _search(self, ctx):\n if ctx.invoked_subcommand is None:\n text = open('texts/search.md').read()\n em = discord.Embed(title='Commandes de search TuxBot', description=text, colour=0x89C4F9)\n await self.bot.say(embed=em)", "def dispatch_command(self, args):\n\t\targuments = {k: v for k, v in vars(args).items() if v is not None}\n\t\tfor c in self.COMMANDS.keys():\n\t\t\tcmd = arguments.get(c, False)\n\t\t\tidx = c\n\t\t\tif cmd:\n\t\t\t\tbreak\n\t\telse:\n\t\t\treturn None\n\n\t\tif cmd not in self.COMMANDS[idx]:\n\t\t\traise CommandNotFoundError(\"{cmd} not registered\".format(cmd=cmd))\n\n\t\treturn getattr(self, self.COMMANDS[idx][cmd])(arguments)", "def shell(self, cmd):\n raise NotImplementedError", "def command():\n return _config.command", "def validate_subcommand(\n commands: Sequence[str], workflows: Sequence[str], subcommand: str\n) -> None:\n if not commands and not workflows:\n msg.fail(f\"No commands or workflows defined in {PROJECT_FILE}\", exits=1)\n if subcommand not in commands and subcommand not in workflows:\n help_msg = []\n if subcommand in [\"assets\", \"asset\"]:\n help_msg.append(\"Did you mean to run: python -m spacy project assets?\")\n if commands:\n help_msg.append(f\"Available commands: {', '.join(commands)}\")\n if workflows:\n help_msg.append(f\"Available workflows: {', '.join(workflows)}\")\n msg.fail(\n f\"Can't find command or workflow '{subcommand}' in {PROJECT_FILE}\",\n \". \".join(help_msg),\n exits=1,\n )", "def _get_command_lookup(self, command_dict):", "def do_command(self, args = ()):\n if len(args) == 0:\n self.do_overview()\n elif len(args) != 1:\n raise ValueError('Wrong number of arguments.')\n elif args[0] in self.base.commands.keys():\n self.do_command_help(args[0])\n else:\n raise ValueError('No such command.')", "def command(self):\n raise NotImplementedError", "def cog_subcommand(\n *,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n base_desc: str = None,\n base_default_permission: bool = True,\n base_permissions: typing.Dict[int, list] = None,\n subcommand_group_description: str = None,\n sub_group_desc: str = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None,\n connector: dict = None,\n):\n base_description = base_description or base_desc\n subcommand_group_description = subcommand_group_description or sub_group_desc\n guild_ids = guild_ids if guild_ids else []\n if not base_permissions:\n base_permissions = {}\n\n def wrapper(cmd):\n decorator_permissions = getattr(cmd, \"__permissions__\", None)\n if decorator_permissions:\n base_permissions.update(decorator_permissions)\n\n desc = description or inspect.getdoc(cmd)\n if options is None:\n opts = manage_commands.generate_options(cmd, desc, connector)\n else:\n opts = options\n\n if guild_ids and not all(isinstance(item, int) for item in guild_ids):\n raise IncorrectGuildIDType(\n f\"The snowflake IDs {guild_ids} given are not a list of integers. Because of discord.py convention, please use integer IDs instead. Furthermore, the command '{name or cmd.__name__}' will be deactivated and broken until fixed.\"\n )\n\n _cmd = {\n \"func\": None,\n \"description\": base_description,\n \"guild_ids\": guild_ids.copy(),\n \"api_options\": [],\n \"default_permission\": base_default_permission,\n \"api_permissions\": base_permissions,\n \"connector\": {},\n \"has_subcommands\": True,\n }\n\n _sub = {\n \"func\": cmd,\n \"name\": name or cmd.__name__,\n \"description\": desc,\n \"base_desc\": base_description,\n \"sub_group_desc\": subcommand_group_description,\n \"guild_ids\": guild_ids,\n \"api_options\": opts,\n \"connector\": connector,\n }\n return CogSubcommandObject(base, _cmd, subcommand_group, name or cmd.__name__, _sub)\n\n return wrapper" ]
[ "0.6903667", "0.68024147", "0.66922116", "0.667356", "0.659423", "0.65638745", "0.63775086", "0.6367834", "0.636292", "0.6309504", "0.623588", "0.62239105", "0.6186224", "0.6177531", "0.61727655", "0.6118774", "0.6098485", "0.6085753", "0.6055098", "0.6037985", "0.6026761", "0.6023961", "0.6011031", "0.6010276", "0.59842", "0.59686923", "0.5933246", "0.5933246", "0.5933246", "0.59292024", "0.59207064", "0.58992887", "0.58986866", "0.5891913", "0.58749664", "0.58723575", "0.58535534", "0.5839353", "0.5838179", "0.5830005", "0.58277273", "0.58277273", "0.5819866", "0.5805033", "0.5799585", "0.5791071", "0.57875335", "0.57591647", "0.5758976", "0.5754734", "0.57457966", "0.57382536", "0.57201785", "0.5718928", "0.57182723", "0.56814915", "0.56803006", "0.5668577", "0.56629914", "0.5645135", "0.56305766", "0.5621844", "0.5621241", "0.56130785", "0.55827135", "0.55740535", "0.556574", "0.5545619", "0.55427444", "0.5533687", "0.5532279", "0.5528207", "0.5509896", "0.5504221", "0.5493051", "0.54877466", "0.5486723", "0.5485975", "0.5485119", "0.5484347", "0.5484184", "0.54834396", "0.547936", "0.5477383", "0.54741246", "0.5466151", "0.5465777", "0.54652137", "0.5464271", "0.5459881", "0.5454196", "0.5444105", "0.544368", "0.5437969", "0.5430727", "0.5427737", "0.5423046", "0.5422013", "0.5419542", "0.5414441" ]
0.6384904
6
Get a subcommand with the indicated name.
def get_subcommand_for_name(name: str) -> Optional[Subcommand]: matching = [s for s in subcommands if s.name.lower() == name.lower()] if matching: return matching[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_subcommand(self, name):\n try:\n subcommand_class = self.subcommands[name]\n except KeyError:\n self.print_command_unkown_error(name)\n sys.exit(1)\n return subcommand_class(self.prog, name, self.argv[2:], self.stdout)", "def get_subcmd(self, name: str) -> \"CommandHelp\":\n try:\n return self.subcmds[name]\n except KeyError:\n # Try looking up by alias\n for sub_name, sub_help in self.subcmds.items():\n for alias in sub_help.aliases:\n if name == alias:\n return self.subcmds[sub_name]\n raise", "def _subcommand_for_name(self, name):\n for subcommand in self.subcommands:\n if name == subcommand.name or \\\n name in subcommand.aliases:\n return subcommand\n return None", "def getCommand(self, name):\n return self.commands[name]()", "def get_command(self, ctx, cmd_name):\n cmd_name = self.MAP.get(cmd_name, cmd_name)\n return click.Group.get_command(self, ctx, cmd_name)", "def get_command_with_name(self, command_name):\n return self.commands[command_name]", "def get_command(self, ctx, name):\n commands = self._iter_commands()\n return commands[name].load()", "def get_command(self, ctx, name):\n try:\n if sys.version_info[0] == 2:\n name = name.encode('ascii', 'replace')\n mod = __import__('cli.commands.cmd_' + name, None, None, ['cli'])\n except ImportError:\n exit(1)\n\n return mod.cli", "def get_command(self, ctx, cmd_name):\n path = \"%s.%s\" % (__name__, cmd_name)\n path = path.replace(\"-\", \"_\")\n try:\n module = importlib.import_module(path)\n return getattr(module, 'cli')\n except ModuleNotFoundError as ex:\n print(ex.name)\n return None", "def _find_subcommand(args):\n subcmd = args[1]\n if subcmd in [\n \"cfg\"\n # , 'init',\n ]:\n return subcmd\n else:\n return None", "def get_command(self, ctx: Context, cmd_name: str) -> Command:\n next_command = self.command.pop(0)\n if not self.command:\n return DocsBaseCommand(\n name=next_command,\n short_help=f\"Documentation for {self.command_string}\",\n callback=self.command_callback,\n )\n return DocsSubCommand(command=self.command)", "def __getattr__(self, name):\n return Command(self.cmd, name)", "def _get_command(self, command_name):\n try:\n return self._commands[command_name]\n except KeyError:\n raise UnsupportedCommand(\n \"Command: {} not supported\".format(command_name)\n )", "def fetch_command(self, global_options, subcommand):\r\n commands = self.get_commands(global_options)\r\n try:\r\n klass = commands[subcommand]\r\n except KeyError:\r\n sys.stderr.write(\"Unknown command: %r\\nType '%s help' for usage.\\nMany commands will only run at project directory, maybe the directory is not right.\\n\" % \\\r\n (subcommand, self.prog_name))\r\n sys.exit(1)\r\n return klass", "def fetch_command(self, subcommand):\n # Get commands outside of try block to prevent swallowing exceptions\n commands = get_commands()\n try:\n app_name = commands[subcommand]\n except KeyError:\n possible_matches = get_close_matches(subcommand, commands)\n sys.stderr.write(\"Unknown command: %r\" % subcommand)\n if possible_matches:\n sys.stderr.write(\". Did you mean %s?\" % possible_matches[0])\n sys.stderr.write(\"\\nType '%s help' for usage.\\n\" % self.prog_name)\n sys.exit(1)\n if isinstance(app_name, BaseCommand):\n # If the command is already loaded, use it directly.\n klass = app_name\n else:\n klass = load_command_class(app_name, subcommand)\n return klass", "def get_command(self, command_name):\n valid_commands = []\n for existing_command in self._blotish_commands.keys():\n if existing_command.startswith(command_name):\n valid_commands.append(existing_command)\n if len(valid_commands) != 1:\n raise blotish.BlotishError, \"No such command '\" + command_name + \"'\"\n return self._blotish_commands[valid_commands[0]]", "def get_command(self, command_name: str):\n self._bot.all_commands.get(command_name, None)", "def get_command(self, object_name, user_key = None):\n\t\treturn self.get_object('command',object_name, user_key = user_key)", "def subcmd(self) -> Optional[str]:\n return self._subcmd", "def fetch_command(self, subcommand):\n try:\n app_name = get_commands()[subcommand]\n except KeyError:\n sys.stderr.write(\"Unknown command: %r\\nType '%s help'\"\n \" for usage.\\n\" % \\\n (subcommand, self.prog_name))\n sys.exit(1)\n if isinstance(app_name, BaseCommand):\n # If the command is already loaded, use it directly.\n klass = app_name\n else:\n klass = load_command_class(app_name, subcommand)\n return klass", "def load_parent_command(name):\n app_name = get_parent_commands()[name]\n module = import_module('%s.management.commands.%s' % (app_name, name))\n return module.Command", "def get(self, command_name):\n if command_name not in self._commands:\n raise CommandNotFound(\"Command {} not found\".format(command_name))\n return self._commands[command_name]", "def get_custom_command(channel: str, name: str) -> Optional[CustomCommand]:\n assert isinstance(name, str), 'name must be of type str'\n session = get_database_session()\n return session.query(CustomCommand).filter(CustomCommand.channel == channel,\n CustomCommand.name == name).one_or_none()", "def sub_command(self, name: str=None, description: str=None, options: list=None, connectors: dict=None, **kwargs):\r\n def decorator(func):\r\n if self.child_type is None:\r\n if len(self.registerable.options) > 0:\r\n self.registerable.options = []\r\n self.child_type = Type.SUB_COMMAND\r\n \r\n new_func = SubCommand(\r\n func,\r\n name=name,\r\n description=description,\r\n options=options,\r\n connectors=connectors,\r\n **kwargs\r\n )\r\n self.children[new_func.name] = new_func\r\n self.registerable.options.append(new_func.option)\r\n return new_func\r\n return decorator", "def get_cmd(self, command):\n return self.commands[command][\"cmd\"]", "def cli():\n pass # do nothing here, it just defines the name for other subcommands", "def get_command(self, module_name, command_name):\r\n actions = self.plugins.get(module_name) or {}\r\n if command_name in actions:\r\n return actions[command_name]\r\n if None in actions:\r\n return actions[None]\r\n raise InvalidCommand(module_name, command_name)", "def get_unique_command(self, command_name):\n valid_commands = []\n for existing_command in self._blotish_commands.keys():\n if existing_command.startswith(command_name):\n valid_commands.append(existing_command)\n if not valid_commands:\n # If no command was found, maybe the command name is a variable name\n func = blotish._find_variable_command(command_name)\n if func is not None:\n return func\n else:\n raise blotish.BlotishError(\"No such command '%s'\" % command_name)\n if len(valid_commands) > 1:\n raise blotish.BlotishError(\n \"Command not found. Possible commands: %s\" % str(\", \").join(valid_commands))\n return self._blotish_commands[valid_commands[0]]", "def get_user_command_by_name(self, uid, command_name):\n uc_data = self.list_user_commands(uid)\n for uc in uc_data:\n if uc['name'] == command_name:\n return ZenossUserCommand(\n self.api_url,\n self.api_headers,\n self.ssl_verify,\n uc,\n parent=self._check_uid(uid)\n )\n\n return None", "def get_command(self, context, name):\n\t\tif name not in self.commands:\n\t\t\tclue = lnk.errors.Message('Did you mess up the default settings?',\n\t\t\t\t\t\t\t\t\t level=0)\n\t\t\ttry_message = lnk.errors.Message(\"See what 'lnk config -k service'\"\n\t\t\t\t\t\t\t\t\t \t\t \" says.\", level=1)\n\t\t\traise lnk.errors.UsageError('Invalid default service.',\n\t\t\t\t\t\t\t\t\t\tClue=clue,\n\t\t\t\t\t\t\t\t\t\tTry=try_message)\n\t\treturn self.commands[name]", "def get_command(pid):", "def sub_command(self, name: str=None, description: str=None, options: list=None, connectors: dict=None, **kwargs):\r\n def decorator(func):\r\n new_func = SubCommand(\r\n func,\r\n name=name,\r\n description=description,\r\n options=options,\r\n connectors=connectors,\r\n **kwargs\r\n )\r\n self.children[new_func.name] = new_func\r\n self.option.options.append(new_func.option)\r\n return new_func\r\n return decorator", "def add_subcommand(\n subp: _SubParsersAction, name: str, description: Optional[str] = None, **kwargs\n ) -> \"CommandParser\":\n desc_help = {\"description\": description, \"help\": description}\n return subp.add_parser(name, **desc_help, **kwargs)", "def nested_subcmd(self, depth: int = 2) -> Optional[str]:\n # pylint: disable=protected-access\n current = 0\n subparser = self.parser\n try:\n while current < depth:\n action = subparser._actions[0]\n if isinstance(action, _SubParsersAction):\n subparser = action.choices[self.args[action.dest]]\n current += 1\n else:\n return None\n return subparser.name.split()[-1]\n except (IndexError, KeyError, TypeError):\n return None", "def get_sub_by_name(name, submasters=None):\n if not submasters:\n submasters = get_global_submasters()\n\n if name in submasters.get_all_sub_names():\n return submasters.get_sub_by_name(name)\n\n try:\n val = int(name)\n s = Submaster(\"#%d\" % val, leveldict={val : 1.0}, temporary=True)\n return s\n except ValueError:\n pass\n\n try:\n subnum = Patch.get_dmx_channel(name)\n s = Submaster(\"'%s'\" % name, leveldict={subnum : 1.0}, temporary=True)\n return s\n except ValueError:\n pass\n\n # make an error sub\n return Submaster('%s' % name)", "def _load_command(self, manager, command_name):\n try:\n # find_command expects the value of argv so split to emulate that\n return manager.find_command(command_name.split())[0]\n except ValueError:\n raise self.error('\"{}\" is not a valid command in the \"{}\" '\n 'namespace'.format(\n command_name, manager.namespace))", "def get_command(self):\n return self.command", "def getCommand(cmds, chan):\n try:\n return cmds[chan]\n except:\n raise Exception(\"Allowed channels are %s.\" % sorted(cmds.keys()))", "def find_command(cmd):\n if cmd:\n root = '.'.join([COMMANDS_PACKAGE_NAME] + cmd)\n else:\n root = COMMANDS_PACKAGE_NAME\n try:\n return _get_commands(root)['__module__'].COMMAND\n except KeyError:\n LOGGER.debug('%r not recognized as a TAU command', cmd)\n resolved = _resolve(cmd, cmd, _COMMANDS[SCRIPT_COMMAND])\n LOGGER.debug('Resolved ambiguous command %r to %r', cmd, resolved)\n return find_command(resolved)\n except AttributeError as err:\n raise InternalError(\"'COMMAND' undefined in %r\" % cmd) from err", "def get_subcommand_query(query_str: str) -> Optional[SubcommandQuery]:\n if not query_str:\n return None\n\n # spilt:\n # \"subcommand_name rest of query\" -> [\"subcommand_name\", \"rest of query\"\"]\n query_parts = query_str.strip().split(None, maxsplit=1)\n\n if len(query_parts) < 2:\n query_str = \"\"\n else:\n query_str = query_parts[1]\n\n subcommand = get_subcommand_for_name(query_parts[0])\n if subcommand:\n return SubcommandQuery(subcommand=subcommand, query=query_str)", "def subcommand(wrapped):\n def callback(scanner, name, ob):\n scanner.subcommands[ob.name] = ob\n venusian.attach(wrapped, callback, category='subcommands')\n return wrapped", "def __getattr__(self, name):\n self._child = _RemoteCommand(\n \"%s.%s\" % (self._name, name), self._parent, self._url\n )\n return self._child", "def get_command(self, ctx, name):\n if name not in self.daemon.list_actions():\n return None\n\n action = self.daemon.get_action(name)\n\n @wraps(action)\n def command(*args, **kwargs):\n return action(*args, **kwargs)\n\n if name in {'start', 'stop', 'restart'}:\n if name in {'start', 'restart'}:\n command = click.option(\n '--debug', is_flag=True,\n help='Do NOT detach and run in the background.',\n )(command)\n if name in {'stop', 'restart'}:\n command = click.option(\n '--force', is_flag=True,\n help='Kill the daemon forcefully after the timeout.',\n )(command)\n command = click.option(\n '--timeout', type=int, default=None,\n help=('Number of seconds to wait for the daemon to stop. '\n 'Overrides \"stop_timeout\" from daemon definition.'),\n )(command)\n if isinstance(self.daemon, MultiDaemon):\n command = click.option(\n '--worker-id', type=int, default=None,\n help='The ID of the worker to {}.'.format(name),\n )(command)\n elif name == 'status':\n command = click.option(\n '--fields', type=str, default=None,\n help='Comma-separated list of process info fields to display.',\n )(command)\n command = click.option(\n '--json', is_flag=True,\n help='Show the status in JSON format.',\n )(command)\n if isinstance(self.daemon, MultiDaemon):\n command = click.option(\n '--worker-id', type=int, default=None,\n help='The ID of the worker whose status to get.',\n )(command)\n else:\n # This is a custom action so try to parse the CLI options\n # by inspecting the function\n for option_args, option_kwargs in _parse_cli_options(action):\n command = click.option(\n *option_args, **option_kwargs)(command)\n\n # Make it into a click command\n command = click.command(name)(command)\n\n return command", "def get_command(self, cmd_str):\n try:\n return self.commands[cmd_str]\n except KeyError:\n raise ServerException('invalid command')", "def subcommand(self, base_name, name, description=MISSING, options=MISSING, guild_ids=MISSING, default_permission=True, guild_permissions=MISSING):\n def wrapper(callback):\n \"\"\"The wrapper for the callback function. The function's parameters have to have the same name as the parameters specified in the slash command.\n\n `ctx` is of type :class:`~SlashedCommand` and is used for responding to the interaction and more\n\n Examples\n --------\n - no parameter:\n `async def command(ctx): ...`\n - required parameter \"number\":\n `async def command(ctx, number): ...`\n - optional parameter \"user\":\n `async def command(ctx, user=default_value)`\n - multiple optional parameters \"user\", \"number\":\n `async def command(ctx, user=default_value, number=default_value)`\n - one required and one optional parameter \"user\", \"text\":\n `async def command(ctx, user, text=default_value)`\n\n Note: Replace `default_value` with a value you want to be used if the parameter is not specified in discord, if you don't want a default value, just set it to `None`\n \"\"\"\n if self.subcommands.get(base_name) is None:\n self.subcommands[base_name] = {}\n\n self.subcommands[base_name][name] = SubSlashCommand(callback, base_name, name, description, options=options, guild_ids=guild_ids, default_permission=default_permission, guild_permissions=guild_permissions)\n return wrapper", "def cog_subcommand(\n *,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n base_desc: str = None,\n subcommand_group_description: str = None,\n sub_group_desc: str = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None,\n connector: dict = None\n):\n base_description = base_description or base_desc\n subcommand_group_description = subcommand_group_description or sub_group_desc\n\n def wrapper(cmd):\n desc = description or inspect.getdoc(cmd)\n if options is None:\n opts = manage_commands.generate_options(cmd, desc, connector)\n else:\n opts = options\n\n _sub = {\n \"func\": cmd,\n \"name\": name or cmd.__name__,\n \"description\": desc,\n \"base_desc\": base_description,\n \"sub_group_desc\": subcommand_group_description,\n \"guild_ids\": guild_ids,\n \"api_options\": opts,\n \"connector\": connector,\n }\n return CogSubcommandObject(_sub, base, name or cmd.__name__, subcommand_group)\n\n return wrapper", "def get_command(self):\n if self.command is not None:\n return self.command\n elif self.parent is not None:\n return self.parent.get_command()\n else:\n return None", "def get_command(self,command):\n\t\treturn self.command_handlers[command]", "def _subcommand_name(ignore=()):\n NON_ANIM_UTILS = [\"cfg\", \"--help\", \"-h\"]\n NON_ANIM_UTILS = [util for util in NON_ANIM_UTILS if util not in ignore]\n\n # If a subcommand is found, break out of the inner loop, and hit the break of the outer loop\n # on the way out, effectively breaking out of both loops. The value of arg will be the\n # subcommand to be taken.\n # If no subcommand is found, none of the breaks are hit, and the else clause of the outer loop\n # is run, setting arg to None.\n\n for item in NON_ANIM_UTILS:\n for arg in sys.argv:\n if arg == item:\n break\n else:\n continue\n break\n else:\n arg = None\n\n return arg", "def _getCommand(self, cmd):\n try:\n cmd_str = cmd.decode('utf-8')\n return getattr(self, 'do_' + cmd_str, None)\n except:\n return None", "def setup_subparser(name, description, commands):\n subparser = SUBPARSER.add_parser(\n name,\n help=description\n )\n subparser.add_argument(\n 'sub_command',\n metavar='sub_command',\n type=str,\n nargs='+',\n help='Which command to run. Options: %s' % ', '.join(commands),\n choices=commands\n )\n\n return subparser", "def get_cmd(self):\n return self.cmds.pop(0) if self.cmds else None", "def _extract_command(self, args):\n opts = self.gopts[:]\n for cmd in self.ctable.values():\n opts.extend(cmd.opts)\n sfl, lfl, _ = self._compute_flags(opts, check_conflicts=False)\n\n lopts,largs = getopt.getopt(args, sfl, lfl)\n if not largs:\n return None\n return self._command(largs[0])", "def get(self, name):\n for func in (self.getarg, self.getflag, self.getcmd):\n try:\n return func(name)\n except KeyError:\n pass\n return None", "def add_subcommands(self, name='subcmd', arg_kws=None, optional=False):\n if self._subcmds is not None:\n raise RuntimeError(\"This config already has subcommands.\")\n if name in self.ReservedVariables or name[0] == '_':\n raise ValueError(\"Config variable name '%s' is reserved.\" % name)\n if name in self.confvariable:\n raise ValueError(\"Config variable '%s' is already defined.\" % name)\n if arg_kws is None:\n arg_kws = dict(title=\"subcommands\")\n else:\n arg_kws = dict(arg_kws)\n arg_kws['dest'] = name\n subparsers = self.argparser.add_subparsers(**arg_kws)\n var = ConfigSubCmds(name, optional, self, subparsers)\n self.confvariable[name] = var\n self.confvariables.append(var)\n self._subcmds = var\n return var", "def command(self, index):\n return self._commands[index]", "def AddSubCommand(self, command_info):\n name = command_info[2]\n self._commands_to_load[name] = command_info", "def GetCommand(name, database):\n value = database.GetValue(name)\n if(value == None):\n return \"Name not found\"\n else:\n return value", "def get_user_command(vcs_name):\n\n try:\n return [vcs['cmd'] for vcs in get_vcs_settings() if vcs.get('name') == vcs_name][0]\n except IndexError:\n return None", "def cmd(self, cmd_name):\n return 'deque.tube.{0}:{1}'.format(self.name, cmd_name)", "def get_command_name(args):\n\n # First argument would always be atlas or manage.py, i.e the calling interface\n if len(args) < 2:\n CommandError.print_to_err(f\"Name of command missing. Valid commands are - {VALID_COMMANDS}\")\n\n return args[1]", "def get_command(self, kword: str):\n # Step Zero is to make sure that the name does not belong to a REAL command.\n zero, mod = super().get_command(kword)\n if zero:\n return zero, mod\n\n # Otherwise, first, ensure that the keyword does in fact exist in the custom list.\n command = self.config.commands.get(kword, None)\n if not command:\n return None, None\n response = command[\"com\"]\n\n # Build the function to return the response. Note that \"self\" exists already.\n async def cmd_custom(args, src, **_):\n if args:\n member = self.get_member(src, args[0].strip())\n tag = member.mention if member else None\n else:\n tag = None\n\n nsfw = command.get(\"nsfw\", False)\n if nsfw and src.channel.id not in self.config.get(\"nsfwChannels\"):\n return None\n\n # Replace tags where needed.\n try:\n output = response.format(\n self=src.author.name,\n myID=src.author.id,\n tag=tag or src.author.mention,\n )\n except KeyError:\n return None\n else:\n return output\n\n # Specify the docstring and name so that !help will work on this.\n short = response.replace(\"{\", \"{{\").replace(\"}\", \"}}\")\n if len(short) > 80:\n short = short[:77] + \"...\"\n cmd_custom.__doc__ = (\n \"__Custom command__: Return the following text: ```{}```\\n\\n\".format(short)\n + command.get(\n \"desc\",\n \"This is a custom command, so available help text is limited, but at the same time, the command is very simple. All it does is return a string, although the string may include formatting tags for invoker name, invoker ID, and a targeted mention.\",\n )\n + \"\\n\\nSyntax: `{p}\"\n + kword.lower()\n + (\" <user_ID>\" if \"{tag}\" in response else \"\")\n + \"`\"\n )\n cmd_custom.__name__ = \"cmd_\" + kword.lower()\n\n return cmd_custom, None", "def cog_subcommand(\n *,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n base_desc: str = None,\n base_default_permission: bool = True,\n base_permissions: typing.Dict[int, list] = None,\n subcommand_group_description: str = None,\n sub_group_desc: str = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None,\n connector: dict = None,\n):\n base_description = base_description or base_desc\n subcommand_group_description = subcommand_group_description or sub_group_desc\n guild_ids = guild_ids if guild_ids else []\n if not base_permissions:\n base_permissions = {}\n\n def wrapper(cmd):\n decorator_permissions = getattr(cmd, \"__permissions__\", None)\n if decorator_permissions:\n base_permissions.update(decorator_permissions)\n\n desc = description or inspect.getdoc(cmd)\n if options is None:\n opts = manage_commands.generate_options(cmd, desc, connector)\n else:\n opts = options\n\n if guild_ids and not all(isinstance(item, int) for item in guild_ids):\n raise IncorrectGuildIDType(\n f\"The snowflake IDs {guild_ids} given are not a list of integers. Because of discord.py convention, please use integer IDs instead. Furthermore, the command '{name or cmd.__name__}' will be deactivated and broken until fixed.\"\n )\n\n _cmd = {\n \"func\": None,\n \"description\": base_description,\n \"guild_ids\": guild_ids.copy(),\n \"api_options\": [],\n \"default_permission\": base_default_permission,\n \"api_permissions\": base_permissions,\n \"connector\": {},\n \"has_subcommands\": True,\n }\n\n _sub = {\n \"func\": cmd,\n \"name\": name or cmd.__name__,\n \"description\": desc,\n \"base_desc\": base_description,\n \"sub_group_desc\": subcommand_group_description,\n \"guild_ids\": guild_ids,\n \"api_options\": opts,\n \"connector\": connector,\n }\n return CogSubcommandObject(base, _cmd, subcommand_group, name or cmd.__name__, _sub)\n\n return wrapper", "def create_command(cmd, args):\n\n\t\tfor cls in BaseCommand.__subclasses__():\n\t\t\tif cls.cmd() == cmd:\n\t\t\t\treturn cls(args)\n\n\t\treturn None", "def get_command(command):\n for _cmd in commands:\n if _cmd.command == command:\n return _cmd\n raise UserWarning(\"telegram command not found.\")", "def command():\n return _config.command", "def cmd(self, cmd):\n return cmd", "def GetFirstCommand(cmd):\n return _gmat_py.GetFirstCommand(cmd)", "def get_subkernel(self, name=None):\n subkernel = self._subkernels.get(name)\n if not subkernel:\n raise PickRegistrationException(f\"No subkernel named {name} found\")\n return subkernel", "def Visit(self, node, parent, is_group):\n command = cli_tree.Command(node, parent, include_hidden_flags=False)\n return command", "def get_cmd(self):\n return self.cmd", "def load_command_class(app_name, name):\n module = import_module('%s.management.commands.%s' % (app_name, name))\n return module.Command()", "def get_flag(flagname):\n if flagname in commands.keys():\n return commands[flagname]\n else:\n for cmdflag in commands.keys():\n if flagname in commands[cmdflag]['aliases']:\n return commands[cmdflag]", "def get_command(bare, path):\n\n if bare:\n cmd = [\"git\", \"fetch\"]\n return cmd\n\n directories = list_directories(os.path.join(path, git_signature))\n\n if \"svn\" in directories:\n cmd = [\"git\", \"svn\", \"rebase\"]\n else:\n cmd = [\"git\", \"pull\"]\n\n return cmd", "def subcommands(self) -> list[\"ProjectCommand\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"subcommands\", _args)\n _ctx = ProjectCommand(_ctx)._select_multiple(\n _description=\"description\",\n _name=\"name\",\n _result_type=\"resultType\",\n )\n return _ctx.execute_sync(list[ProjectCommand])", "def list_commands_by_name(self):\n return self._get(\"commandsByName\", ApiCommandMetadata, True, api_version=6)", "def find_subcommands():\n clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n subclasses = [cls for cls in clsmembers if issubclass(cls[1], (Subcommand, LocalSubcommand))\n and cls[1] not in [Subcommand, LocalSubcommand]]\n\n subcommands = []\n for subclass in subclasses:\n name = '-'.join([part.lower() for part in re.findall('[A-Z][a-z]*', subclass[0])])\n subcommands.append((name, subclass[1]))\n return subcommands", "def command(self, name, method_name=None, **kwargs):\n return self._command(name, method_name=method_name, **kwargs)", "def call_command_direct(self, name, args=None, kwargs=None):\n comm,_=self._commands[name]\n return comm(*(args or []),**(kwargs or {}))", "def add_subcommand(self, cmd):\n self.subcommands.append(cmd)", "def get_arg(self, name):\n return getattr(self.args, f\"{self.key}_{self.alias}_{name}\")", "def _get_cmd(cls, command, f_config, verbose=False):\n if command not in cls.COMMANDS:\n raise KeyError('Could not recongize command \"{}\". '\n 'Available commands are: {}'\n .format(command, cls.COMMANDS))\n cmd = cls.CMD_BASE.format(fp_config=f_config, command=command)\n if verbose:\n cmd += ' -v'\n\n return cmd", "def getCommand(self):\n return self.__cmd", "def get_cmd(self, cmd_class, argument_string=\"\"): \n cmd = cmd_class()\n cmd.caller = self.char1\n cmd.cmdstring = cmd_class.key\n cmd.args = argument_string\n cmd.cmdset = None\n cmd.obj = self.char1\n return cmd", "def command_name(self):\n return None", "def extractCommands(controlName, includeName = True):\n return (_extractCommands(controlName, includeName))", "def command(self, function=None, name=None):\r\n if name is None:\r\n return self._command(function)\r\n else:\r\n return partial(self._command, name=name)", "def get_commandname(self):\n for line in self.helplines:\n if \"Usage:\" in line and self.parser_type is 'optparse':\n tmp = line.split()\n return tmp[1]\n if \"usage:\" in line and self.parser_type is 'argparse':\n tmp = line.split()\n return tmp[1]\n return None", "def register_command(name):\n\n def register(cmd):\n Facade().register_command(name, cmd)\n return cmd\n\n return register", "def select_module(module, name):\n mod_name = \".\".join([\"biobox_cli\", module, name])\n try:\n __import__(mod_name)\n except ImportError:\n err_exit('unknown_command',\n {'command_type': str.replace(module, '_', ' '), 'command': name})\n return sys.modules[mod_name]", "def __getattr__(self, name):\n try:\n return self[self.sig.argpos(name)]\n except:\n pass\n return BasicCall.__getattr__(self, name)", "def wrapper(callback):\n if self.subcommands.get(base_name) is None:\n self.subcommands[base_name] = {}\n\n self.subcommands[base_name][name] = SubSlashCommand(callback, base_name, name, description, options=options, guild_ids=guild_ids, default_permission=default_permission, guild_permissions=guild_permissions)", "def get_command_template(\n project_dictionary: Dictionaries, command_name: str\n ) -> CmdTemplate:\n return project_dictionary.command_name[command_name]", "def GetSubkeyByName(self, name):", "def get_command_called(self, slack_message: str) -> _SingleCommand:\n for command in self:\n command_part, _ = command.split_message(slack_message)\n if command_part:\n return command", "def get_cmd(self, action=None, has_prompt=False, undo_unit=False,\r\n flush=False, display=True):\r\n if flush and self.select_cmd is not None:\r\n self.do_cmd()\r\n if action is None:\r\n cmd = self.select_cmd\r\n if cmd is None:\r\n raise SelectError(\"get_cmd: No name for SelectCommand\")\r\n return cmd\r\n else:\r\n if self.select_cmd is not None:\r\n raise SelectError(\"get_cmd: previous cmd(%s) not completed\"\r\n % self.select_cmd)\r\n self.select_cmd = SelectCommandPlay(action, has_prompt=has_prompt,\r\n undo_unit=undo_unit,\r\n display=display)\r\n return self.select_cmd", "def command_from_module_name(module_name):\n if module_name == '__main__':\n return os.path.basename(TAUCMDR_SCRIPT)\n return ' '.join(_command_as_list(module_name))", "def get_command(self, player):\n return super().get_command(player)", "def execute(name, patterns):\n for pat in patterns:\n m = re.search(pat[0], name)\n if m is not None:\n cmd = pat[1][0]\n args = pat[1][1]\n kwargs = pat[1][2]\n kwargs.update(m.groupdict())\n return cmd(*args, **kwargs)\n raise NoSuchCommand(u\"Can’t find command: %s\" % name)", "def _get_commands(package_name):\n # pylint: disable=line-too-long\n def lookup(cmd, dct):\n if not cmd:\n return dct\n if len(cmd) == 1:\n return dct[cmd[0]]\n return lookup(cmd[1:], dct[cmd[0]])\n\n def walking_import(module, cmd, dct):\n car, cdr = cmd[0], cmd[1:]\n if cdr:\n walking_import(module, cdr, dct[car])\n elif car not in dct:\n __import__(module)\n dct.setdefault(car, {})['__module__'] = sys.modules[module]\n\n __import__(COMMANDS_PACKAGE_NAME)\n command_module = sys.modules[COMMANDS_PACKAGE_NAME]\n for _, module, _ in pkgutil.walk_packages(command_module.__path__, prefix=command_module.__name__+'.'):\n if not (module.endswith('__main__') or '.tests' in module):\n try:\n lookup(_command_as_list(module), _COMMANDS)\n except KeyError:\n walking_import(module, _command_as_list(module), _COMMANDS)\n return lookup(_command_as_list(package_name), _COMMANDS)" ]
[ "0.8886178", "0.8527717", "0.79279536", "0.7438187", "0.735898", "0.7330646", "0.72542876", "0.7133576", "0.70916045", "0.70674455", "0.7017528", "0.6924694", "0.6864823", "0.6756936", "0.6737042", "0.6726676", "0.6696921", "0.6614363", "0.66121435", "0.66044974", "0.65040296", "0.6452745", "0.6230085", "0.6170363", "0.61311877", "0.61185974", "0.6096523", "0.60777205", "0.605532", "0.60272646", "0.6018002", "0.6002105", "0.6000813", "0.5963563", "0.5922691", "0.59072715", "0.5896054", "0.58871", "0.5873227", "0.5835965", "0.58335614", "0.5830731", "0.5818682", "0.5797172", "0.57718515", "0.5759713", "0.57579195", "0.5747406", "0.57381445", "0.5737984", "0.5729612", "0.5718977", "0.57107955", "0.5706059", "0.5697263", "0.5688041", "0.56667525", "0.56585824", "0.5649262", "0.561345", "0.56071275", "0.560391", "0.55956596", "0.5592454", "0.55924404", "0.55845654", "0.55819666", "0.5546089", "0.5533427", "0.55249995", "0.55184436", "0.5509348", "0.55065644", "0.54811347", "0.54760146", "0.5472379", "0.5462319", "0.54591435", "0.5456995", "0.5452272", "0.54493475", "0.54426855", "0.5431994", "0.5398848", "0.53933185", "0.5380811", "0.53758675", "0.5373158", "0.5369558", "0.5360441", "0.5353694", "0.5351359", "0.53505194", "0.5347031", "0.53445673", "0.5344064", "0.53221273", "0.53132725", "0.53060174", "0.53022844" ]
0.8167717
2
Determine whether current query is of a subcommand. If so first returned the corresponding SubcommandQeury object.
def get_subcommand_query(query_str: str) -> Optional[SubcommandQuery]: if not query_str: return None # spilt: # "subcommand_name rest of query" -> ["subcommand_name", "rest of query""] query_parts = query_str.strip().split(None, maxsplit=1) if len(query_parts) < 2: query_str = "" else: query_str = query_parts[1] subcommand = get_subcommand_for_name(query_parts[0]) if subcommand: return SubcommandQuery(subcommand=subcommand, query=query_str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_sub_commands(self) -> bool:\n if self.__dict__.get(\"sub_commands\"):\n return True\n\n return False", "def _subcommand_for_name(self, name):\n for subcommand in self.subcommands:\n if name == subcommand.name or \\\n name in subcommand.aliases:\n return subcommand\n return None", "def _find_subcommand(args):\n subcmd = args[1]\n if subcmd in [\n \"cfg\"\n # , 'init',\n ]:\n return subcmd\n else:\n return None", "def subcmd(self) -> Optional[str]:\n return self._subcmd", "def get_subcommand_for_name(name: str) -> Optional[Subcommand]:\n matching = [s for s in subcommands if s.name.lower() == name.lower()]\n if matching:\n return matching[0]", "def is_command(schema_obj):\n\n return isinstance(schema_obj, schema.Command)", "def fetch_subcommand(self, name):\n try:\n subcommand_class = self.subcommands[name]\n except KeyError:\n self.print_command_unkown_error(name)\n sys.exit(1)\n return subcommand_class(self.prog, name, self.argv[2:], self.stdout)", "def is_in_cmd(self):\r\n return self.select_cmd is not None", "def get_subcmd(self, name: str) -> \"CommandHelp\":\n try:\n return self.subcmds[name]\n except KeyError:\n # Try looking up by alias\n for sub_name, sub_help in self.subcmds.items():\n for alias in sub_help.aliases:\n if name == alias:\n return self.subcmds[sub_name]\n raise", "def _is_command(obj, cli):\n if not inspect.isfunction(obj) or obj.__name__.startswith(\"_\"):\n return False\n return hasattr(obj, \"__module__\") and obj.__module__ == cli.__name__", "def __Ancestor(self, flag):\n command = self._parent\n while command:\n if flag in command.flags:\n return True\n command = command._parent # pylint: disable=protected-access\n return False", "def _is_command(self, ext):\n try:\n return issubclass(ext, CommandExtension)\n except TypeError:\n return False", "def test_subCommandInTwoPlaces(self):\n class SubOpt(usage.Options):\n pass\n class OptFoo(usage.Options):\n subCommands = [\n ('foo', 'f', SubOpt, 'quux'),\n ]\n class OptBar(usage.Options):\n subCommands = [\n ('bar', 'b', SubOpt, 'quux'),\n ]\n oFoo = OptFoo()\n oFoo.parseOptions(['foo'])\n oBar=OptBar()\n oBar.parseOptions(['bar'])\n self.failUnless(hasattr(oFoo.subOptions, 'parent'))\n self.failUnless(hasattr(oBar.subOptions, 'parent'))\n self.failUnlessIdentical(oFoo.subOptions.parent, oFoo)\n self.failUnlessIdentical(oBar.subOptions.parent, oBar)", "def fetch_command(self, global_options, subcommand):\r\n commands = self.get_commands(global_options)\r\n try:\r\n klass = commands[subcommand]\r\n except KeyError:\r\n sys.stderr.write(\"Unknown command: %r\\nType '%s help' for usage.\\nMany commands will only run at project directory, maybe the directory is not right.\\n\" % \\\r\n (subcommand, self.prog_name))\r\n sys.exit(1)\r\n return klass", "def fetch_command(self, subcommand):\n # Get commands outside of try block to prevent swallowing exceptions\n commands = get_commands()\n try:\n app_name = commands[subcommand]\n except KeyError:\n possible_matches = get_close_matches(subcommand, commands)\n sys.stderr.write(\"Unknown command: %r\" % subcommand)\n if possible_matches:\n sys.stderr.write(\". Did you mean %s?\" % possible_matches[0])\n sys.stderr.write(\"\\nType '%s help' for usage.\\n\" % self.prog_name)\n sys.exit(1)\n if isinstance(app_name, BaseCommand):\n # If the command is already loaded, use it directly.\n klass = app_name\n else:\n klass = load_command_class(app_name, subcommand)\n return klass", "def is_subcall(self):\n return False", "def is_command_response(schema_obj):\n\n return isinstance(schema_obj, schema.CommandResponse)", "def subectIsSelf():", "def is_Q(self):\n return isinstance(self,Q)", "def _dispatching(self):\n return bool(self.generate_config or self.subapp or self.subcommand)", "def fetch_command(self, subcommand):\n try:\n app_name = get_commands()[subcommand]\n except KeyError:\n sys.stderr.write(\"Unknown command: %r\\nType '%s help'\"\n \" for usage.\\n\" % \\\n (subcommand, self.prog_name))\n sys.exit(1)\n if isinstance(app_name, BaseCommand):\n # If the command is already loaded, use it directly.\n klass = app_name\n else:\n klass = load_command_class(app_name, subcommand)\n return klass", "def is_cmd(self, name):\n \n return name in self.cmds", "def get_command(self):\n if self.command is not None:\n return self.command\n elif self.parent is not None:\n return self.parent.get_command()\n else:\n return None", "def responds_to(self, command) -> bool:\n return command == self.command and self.active is True and self.command is not None", "def nested_subcmd(self, depth: int = 2) -> Optional[str]:\n # pylint: disable=protected-access\n current = 0\n subparser = self.parser\n try:\n while current < depth:\n action = subparser._actions[0]\n if isinstance(action, _SubParsersAction):\n subparser = action.choices[self.args[action.dest]]\n current += 1\n else:\n return None\n return subparser.name.split()[-1]\n except (IndexError, KeyError, TypeError):\n return None", "def __init__(self, subcommand: Subcommand, query: str):\n\n self.command = subcommand\n self.query = query", "def is_valid_command(args):\n if args.command is not None:\n return True\n return False", "def is_command_ancillary(args):\n # pylint: disable=bad-continuation\n if (\n # skip the parent check and only\n # determine if the parameter is present\n is_valid_executes(args, skip=True)\n ):\n return True\n return False", "def is_command(self, text):\n return text.split(' ', 1)[0].startswith(\"!\")", "def _iscommand(self, key):\r\n\t\tyes = False\r\n\t\tfor i in COMMAND_NAME.keys():\r\n\t\t\tif key == i: \r\n\t\t\t\tyes = True; break\r\n\t\treturn yes", "def call_subshell(subshell):\n curses.def_prog_mode()\n #curses.endwin() # Probably causes a memory leak.\n\n rtn = os.system(\"%s\" % (subshell))\n curses.reset_prog_mode()\n if rtn is not 0:\n return False\n else:\n return True", "def _msg_is_command(self, msg):\n return isinstance(msg, dict)", "def commands_for_submode(prefix):\n candidates = _lookup_command_candidates(prefix, command_registry)\n # print type(candidates), [x['self'] for x in candidates]\n # print [x.get('all-help') for x in candidates]\n if debug.cli():\n # print 'commands_for_submode:', sorted(dict([[x['name']['title'] if type(x['name']) == dict else x['name'], None] for x in candidates]).keys())\n pass\n return candidates", "def find_subcommands():\n clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n subclasses = [cls for cls in clsmembers if issubclass(cls[1], (Subcommand, LocalSubcommand))\n and cls[1] not in [Subcommand, LocalSubcommand]]\n\n subcommands = []\n for subclass in subclasses:\n name = '-'.join([part.lower() for part in re.findall('[A-Z][a-z]*', subclass[0])])\n subcommands.append((name, subclass[1]))\n return subcommands", "def test_get_invalid_subcommand_help(self):\r\n self.assertEqual(self.testcommand.get_help(),\r\n self.testcommand.get_help(subcommand=\"foo\"))", "def check_command(self):\n return self.process is not None and self.process.poll() is None", "def _is_pop_command(self):\n return self._match_memory_pattern(\"pop\")", "def _subcommand_name(ignore=()):\n NON_ANIM_UTILS = [\"cfg\", \"--help\", \"-h\"]\n NON_ANIM_UTILS = [util for util in NON_ANIM_UTILS if util not in ignore]\n\n # If a subcommand is found, break out of the inner loop, and hit the break of the outer loop\n # on the way out, effectively breaking out of both loops. The value of arg will be the\n # subcommand to be taken.\n # If no subcommand is found, none of the breaks are hit, and the else clause of the outer loop\n # is run, setting arg to None.\n\n for item in NON_ANIM_UTILS:\n for arg in sys.argv:\n if arg == item:\n break\n else:\n continue\n break\n else:\n arg = None\n\n return arg", "def known_command(self, command):\n return self._known_command(command, self.do_command)", "def get_command(self, ctx: Context, cmd_name: str) -> Command:\n next_command = self.command.pop(0)\n if not self.command:\n return DocsBaseCommand(\n name=next_command,\n short_help=f\"Documentation for {self.command_string}\",\n callback=self.command_callback,\n )\n return DocsSubCommand(command=self.command)", "def __get_general_subscr_info(self):\n query = (\"SELECT d.datname, r.rolname, s.subenabled, \"\n \"s.subconninfo, s.subslotname, s.subsynccommit, \"\n \"s.subpublications FROM pg_catalog.pg_subscription s \"\n \"JOIN pg_catalog.pg_database d \"\n \"ON s.subdbid = d.oid \"\n \"JOIN pg_catalog.pg_roles AS r \"\n \"ON s.subowner = r.oid \"\n \"WHERE s.subname = %(name)s AND d.datname = %(db)s\")\n\n result = exec_sql(self, query, query_params={'name': self.name, 'db': self.db}, add_to_executed=False)\n if result:\n return result[0]\n else:\n return False", "def querySubdiv(*args, action: int=0, level: int=0, relative: bool=True, **kwargs)->bool:\n pass", "def IsValidSubPath(self, command_path):\n current = self\n for part in command_path:\n current = current.LoadSubElement(part)\n if not current:\n return False\n return True", "def test_get_invalid_subcommand_help(self):\n self.assertEqual(self.testcommand.get_help(),\n self.testcommand.get_help(subcommand=\"foo\"))", "def has_command_with_name(self, command_name):\n return command_name in self.commands", "def isSuper(self):\n user = self.getSession()\n return self.pipe.auth.isSuper(user)", "def test_no_query_passed(self, api_client):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.helper.sys\") as sys:\n sys.stdin.isatty.return_value = True\n result = runner.invoke(\n subcommand.query, parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise query\" in result.output\n api_client.query.assert_not_called()", "def run(self):\n if self.subcommand_parser is None:\n self.exit(1, 'Command defines no subcommands')\n\n args = self.parse_args()\n if args.command is None:\n self.exit(1, 'No command selected')", "def get_command(self, object_name, user_key = None):\n\t\treturn self.get_object('command',object_name, user_key = user_key)", "def construct_subcommand(\n parser,\n hooks=None,\n arg_filter=None,\n is_root=True\n):\n subcommands = []\n options = []\n args = []\n subcommand = {}\n hooks = {} if hooks is None else hooks\n subcommand_hook = hooks.get(\"subcommand\")\n\n if is_root:\n subcommand[\"name\"] = parser.prog\n\n for arg in parser._actions:\n if arg_filter is not None and arg_filter(arg):\n continue\n if arg.nargs == argparse.PARSER:\n subcommand.update(get_base_suggestion(arg))\n help_map = {a.dest: a.help for a in arg._choices_actions}\n\n nested_subcommands = {}\n for name, nested_parser in arg.choices.items():\n if nested_parser in nested_subcommands:\n nested_subcommands[nested_parser][\"name\"].append(name)\n else:\n nested_subcommands[nested_parser] = {\n \"name\": [name],\n **construct_subcommand(\n nested_parser,\n hooks=hooks,\n arg_filter=arg_filter,\n is_root=False\n ),\n }\n if name in help_map and help_map[name] != argparse.SUPPRESS:\n nested_subcommands[nested_parser][\"description\"] = str(help_map[name])\n for p, nested_subcommand in nested_subcommands.items():\n if len(nested_subcommand[\"name\"]) == 1:\n nested_subcommand[\"name\"] = nested_subcommand[\"name\"][0]\n if subcommand_hook:\n subcommand_hook(nested_subcommand, p)\n subcommands.append(nested_subcommand)\n elif arg.option_strings:\n options.append(construct_option(arg, hooks, parser))\n else:\n args.extend(construct_args(arg, hooks, parser))\n\n if subcommands:\n subcommand[\"subcommands\"] = subcommands\n if options:\n subcommand[\"options\"] = options\n if args:\n subcommand[\"args\"] = args\n\n if is_root and subcommand_hook:\n subcommand_hook(subcommand, parser)\n\n return subcommand", "def get_command(self):\n req_type = type(self.req)\n\n if req_type == ureq.CreateEntryRequest:\n return commands.CreateCommand(self.req.results)\n elif req_type == ureq.ReadEntryRequest:\n return commands.ReadCommand(self.req.results)\n elif req_type == ureq.UpdateEntryRequest:\n return commands.UpdateCommand(self.req.results)\n elif req_type == ureq.DeleteEntryRequest:\n return commands.DeleteCommand(self.req.results)", "def test_subcommand_conflict(self):\n subcommand = {\n var: cli_parser.__dict__.get(var)\n for var in cli_parser.__dict__\n if var.isupper() and var.startswith(\"COMMANDS\")\n }\n for group_name, sub in subcommand.items():\n name = [command.name.lower() for command in sub]\n assert len(name) == len(set(name)), f\"Command group {group_name} have conflict subcommand\"", "def is_command(line: str) -> bool:\n if line[0] == \"$\":\n return True\n return False", "async def cool(ctx):\n if ctx.invoked_subcommand is None:\n await bot.say('No, {0.subcommand_passed} is not cool'.format(ctx))", "def valid_command(command):\n\n (command_name, arg1) = split_command_input(command)\n\n slipt_arg1 = arg1.split('-')\n digit = ''\n if \" \" in arg1:\n (digit, rev) = arg1.split(' ')\n \n\n\n return command_name.lower() in valid_commands and (len(arg1) == 0 or is_int(arg1)\\\n or arg1.lower() == 'silent' or arg1.lower() == 'reversed' or arg1.lower() \\\n == 'reversed silent' or (is_int(slipt_arg1[0]) and is_int(slipt_arg1[1]))\\\n or (is_int(digit) and rev == 'reversed') or (is_int(digit) and rev == 'silent'))", "def subcommand(wrapped):\n def callback(scanner, name, ob):\n scanner.subcommands[ob.name] = ob\n venusian.attach(wrapped, callback, category='subcommands')\n return wrapped", "def hasCommand():\n args = sys.argv[1:]\n if '--help' in args:\n return False\n if '-h' in args:\n return False\n for arg in args:\n if arg and not arg.startswith('-'):\n return True\n return False", "async def event(self, ctx):\r\n if ctx.invoked_subcommand is None:\r\n await self.bot.say('No, {0.subcommand_passed} is not cool'.format(ctx))", "def _isCmdStandalone(tgen):\n features = getattr(tgen, 'features', [])\n otherFeatures = set(features) - set(('runcmd', ))\n return not otherFeatures and getattr(tgen, 'rule', None) is None", "def is_command(oin, env, pred_name: YPredName, arg: Any=None):\n return (env.check_predicate(obj, pred_name, arg) for obj in oin)", "def test_axiomaticSubcommand(self):\n subCommands = AxiomaticOptions().subCommands\n [options] = [cmd[2] for cmd in subCommands if cmd[0] == 'port']\n self.assertIdentical(options, PortConfiguration)", "def test_get_subcommand_help(self):\n subcommands = list(self.testcommand.subparser.choices.keys())\n for subcommand in subcommands:\n help_message = self.testcommand.get_help(subcommand=subcommand)\n self.assertEqual(1, help_message.count(\"usage\"))", "def has_command(self, command):\n for pbt in self._plugins.values():\n if pbt.command == command:\n return True\n return False", "def is_of_type(cmd):\r\n raise NotImplementedError()", "def is_command(text):\n return text.startswith('/')", "def is_command(text):\n return text.startswith('/')", "def main(ctx):\n\n print(\"Mode:\", ctx.invoked_subcommand)", "def test_get_subcommand_help(self):\r\n subcommands = list(self.testcommand.subparser.choices.keys())\r\n for subcommand in subcommands:\r\n help_message = self.testcommand.get_help(subcommand=subcommand)\r\n self.assertEqual(1, help_message.count(\"usage\"))", "def find_command(cmd):\n if cmd:\n root = '.'.join([COMMANDS_PACKAGE_NAME] + cmd)\n else:\n root = COMMANDS_PACKAGE_NAME\n try:\n return _get_commands(root)['__module__'].COMMAND\n except KeyError:\n LOGGER.debug('%r not recognized as a TAU command', cmd)\n resolved = _resolve(cmd, cmd, _COMMANDS[SCRIPT_COMMAND])\n LOGGER.debug('Resolved ambiguous command %r to %r', cmd, resolved)\n return find_command(resolved)\n except AttributeError as err:\n raise InternalError(\"'COMMAND' undefined in %r\" % cmd) from err", "def do_known_command(self, cmd):\n if cmd in self.commands:\n return \"true\", True\n else:\n return \"false\", True", "def subectIsSelf():\n return (isinstance(subject, PartyProxy))", "def validate_subcommand(\n commands: Sequence[str], workflows: Sequence[str], subcommand: str\n) -> None:\n if not commands and not workflows:\n msg.fail(f\"No commands or workflows defined in {PROJECT_FILE}\", exits=1)\n if subcommand not in commands and subcommand not in workflows:\n help_msg = []\n if subcommand in [\"assets\", \"asset\"]:\n help_msg.append(\"Did you mean to run: python -m spacy project assets?\")\n if commands:\n help_msg.append(f\"Available commands: {', '.join(commands)}\")\n if workflows:\n help_msg.append(f\"Available workflows: {', '.join(workflows)}\")\n msg.fail(\n f\"Can't find command or workflow '{subcommand}' in {PROJECT_FILE}\",\n \". \".join(help_msg),\n exits=1,\n )", "def execute(self):\n\n options, args = self.parser.parse_args(self.argv)\n\n try:\n subcommand_name = self.argv[1]\n except IndexError:\n subcommand_name = 'help'\n\n if subcommand_name == 'help':\n if len(args) <= 2:\n self.print_help()\n else:\n self.fetch_subcommand(self.argv[2]).print_help()\n elif subcommand_name == 'version':\n self.print_version()\n else:\n self.fetch_subcommand(subcommand_name).execute()", "def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"command\")", "def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"command\")", "def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"command\")", "def is_valid_command(self, word: str, *, is_subcommand: bool = False) -> Tuple[bool, str]:\n valid = False\n\n if not isinstance(word, str):\n return False, f'must be a string. Received {str(type(word))} instead' # type: ignore[unreachable]\n\n if not word:\n return False, 'cannot be an empty string'\n\n if word.startswith(constants.COMMENT_CHAR):\n return False, 'cannot start with the comment character'\n\n if not is_subcommand:\n for shortcut, _ in self.shortcuts:\n if word.startswith(shortcut):\n # Build an error string with all shortcuts listed\n errmsg = 'cannot start with a shortcut: '\n errmsg += ', '.join(shortcut for (shortcut, _) in self.shortcuts)\n return False, errmsg\n\n errmsg = 'cannot contain: whitespace, quotes, '\n errchars = []\n errchars.extend(constants.REDIRECTION_CHARS)\n errchars.extend(self.terminators)\n errmsg += ', '.join([shlex.quote(x) for x in errchars])\n\n match = self._command_pattern.search(word)\n if match:\n if word == match.group(1):\n valid = True\n errmsg = ''\n return valid, errmsg", "def expects_result(self, command):\n return isinstance(command, (self.package(\"Syntax\").Operator,\n self.package(\"Syntax\").Formule))", "def __is_active(self, command):\n return True", "def subresource(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subresource\")", "def is_valid_command(self, string):\n return string[:3] == \"--!\"", "def _get_command(self, message, db_session):\n first_word = self.ts.get_human_readable_message(message).split(' ')[0]\n if len(first_word) > 1 and first_word[0] == '!':\n potential_command = first_word[1:].lower()\n else:\n return None\n if potential_command in self.sorted_methods['for_all']:\n return [potential_command, 'for_all']\n if potential_command in self.sorted_methods['for_mods']:\n return [potential_command, 'for_mods']\n db_result = db_session.query(db.Command).filter(db.Command.call == potential_command).all()\n if db_result:\n return [potential_command, db_result[0]]\n return None", "def queryable(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"queryable\")", "def subshell(cmd, check=True, stderr=None, strip=True):\n eprint(f\"+ $({cmd})\")\n result = run(cmd, shell=True, stdout=PIPE, stderr=stderr, encoding=\"utf8\")\n if result.returncode != 0 and check:\n if stderr == PIPE:\n eprint(result.stderr)\n eprint(result.stdout)\n raise UserError(f\"Exit code {result.returncode}: {cmd}\")\n out = result.stdout\n if strip:\n out = out.strip()\n return out", "async def cool(ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send('No, {0.subcommand_passed} is not cool'.format(ctx))", "def is_cli():\n return bool(_environ.get(\"ACCELPY_CLI\", False))", "def cli():\n pass # do nothing here, it just defines the name for other subcommands", "def _isSubProcessRunning(self): \n # Check if child process has terminated. Set and return returncode attribute.\n if self.__process.poll() is None:\n return True\n else:\n return False", "def get_command(self, ctx, cmd_name):\n path = \"%s.%s\" % (__name__, cmd_name)\n path = path.replace(\"-\", \"_\")\n try:\n module = importlib.import_module(path)\n return getattr(module, 'cli')\n except ModuleNotFoundError as ex:\n print(ex.name)\n return None", "def add_subcommand(self, command):\n\n if self.subcommand_parser is None:\n self.subcommand_parser = self.parser.add_subparsers(\n dest='command', help='Please select one command mode below',\n title='Command modes'\n )\n self.subcommands = {}\n\n if not isinstance(command, ScriptCommand):\n raise ScriptError('Subcommand must be a ScriptCommand instance')\n\n parser = self.subcommand_parser.add_parser(\n command.name,\n help=command.short_description,\n description=command.description,\n epilog=command.epilog,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n self.subcommands[command.name] = command\n command.script = self\n\n if callable(getattr(command, '__register_arguments__', None)):\n command.__register_arguments__(parser)\n\n return parser", "def subpros(cmd):\n a = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE)\n #a.wait()\n if a.poll() == 0 or a.poll() == None: #假如cmd命令执行成功,返回0\n #self.loger(self.curtime()+ \"\\t\" + u\"连接subprocess成功\")\n b = a.stdout.read()\n try:\n if str(b.split(\":\")[0]) != \"Error\":\n print u\"连接subprocess执行命令\\\"%s\\\"\"%cmd\n #self.loger(self.curtime()+ \"\\t\" + u\"连接subprocess执行命令\\\"%s\\\"\"%cmd)\n else:\n print u\"执行命令\\\"%s\\\"时\\n\"%cmd + '\\t出现错误:\\n' + b\n #self.loger(self.curtime()+ \"\\t\" + u\"执行命令\\\"%s\\\"时\\n\"%cmd + '\\t出现错误:\\n' + b)\n except: pass\n else:\n #dlg = wx.MessageDialog(None, u'无法连接设备!\\n\\n请查看ADB线是否插上',\n # 'MessageDialog', wx.OK | wx.ICON_QUESTION)\n #result = dlg.ShowModal()\n #dlg.Destroy()\n #self.loger(self.curtime()+ \"\\t\" + u'无法连接设备!请查看ADB线是否插上' + '\\n')\n b = False\n try: a.kill()\n except: pass\n return b", "def extend_cli(self, subparser):", "def has_stp_cli(self):\n if self.is_escom_l:\n cmd = self.cli(\"show spanning-tree\")\n return \"Spanning tree enabled\" in cmd\n else:\n cmd = self.cli(\"show spanning-tree active\")\n return \" enabled \" in cmd", "def cmd(self):\n orig_cmd = super().cmd\n found = shutil.which(orig_cmd)\n return found if found is not None else orig_cmd", "def subcommands(self) -> list[\"ProjectCommand\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"subcommands\", _args)\n _ctx = ProjectCommand(_ctx)._select_multiple(\n _description=\"description\",\n _name=\"name\",\n _result_type=\"resultType\",\n )\n return _ctx.execute_sync(list[ProjectCommand])", "def load_into(subparser, as_cmd=None):\n p = subparser\n p.description = description\n\n if not as_cmd:\n as_cmd = default_name\n out = cli.CommandSuite(as_cmd, p)\n out.load_subcommand(topics)\n return out", "def execute(self):\n try:\n subcommand = self.argv[1]\n except IndexError:\n subcommand = \"help\" # Display help if no arguments were given.\n\n # These options could affect the commands that are available, so they\n # must be processed early.\n parser = CommandParser(\n prog=self.prog_name,\n usage=\"%(prog)s subcommand [options] [args]\",\n add_help=False,\n allow_abbrev=False,\n )\n parser.add_argument(\"args\", nargs=\"*\") # catch-all\n try:\n options, args = parser.parse_known_args(self.argv[2:])\n except CommandError:\n pass # Ignore any option errors at this point.\n\n if subcommand == \"help\":\n if \"--commands\" in args:\n sys.stdout.write(self.main_help_text(commands_only=True) + \"\\n\")\n elif not options.args:\n sys.stdout.write(self.main_help_text() + \"\\n\")\n else:\n self.fetch_command(options.args[0]).print_help(\n self.prog_name, options.args[0]\n )\n elif subcommand == \"version\" or self.argv[1:] == [\"--version\"]:\n sys.stdout.write(get_named_version() + \"\\n\")\n elif self.argv[1:] in ([\"--help\"], [\"-h\"]):\n sys.stdout.write(self.main_help_text() + \"\\n\")\n else:\n self.fetch_command(subcommand).run_from_argv(self.argv)", "def has_commands(self) -> bool:\n return len(self.commands) > 0", "def __commandExists(self, command, cmdtype):\n try:\n # method exists\n if hasattr(self, self.__getFullCommandName(command, cmdtype)):\n # command handler type exists\n if self.__commandHandlerTypeExists(cmdtype):\n return True\n else:\n return False\n else:\n return False\n # any key does not exist\n except KeyError:\n return False", "def is_valid_command(command):\n return is_get(command) or is_insert(command) or is_update(command) or is_delete(command) or is_showall(command) or is_search(command)" ]
[ "0.65202355", "0.6357731", "0.63516015", "0.6259924", "0.60126", "0.59781826", "0.5967946", "0.5843146", "0.5827129", "0.5752442", "0.5749248", "0.57485867", "0.5696898", "0.5505429", "0.5458622", "0.53960603", "0.5385143", "0.53838754", "0.5379334", "0.53359336", "0.5302235", "0.5151404", "0.514488", "0.5140806", "0.5090426", "0.50821155", "0.50634664", "0.5018499", "0.5016916", "0.49812126", "0.49619937", "0.49424753", "0.49339736", "0.49279717", "0.4927806", "0.49250844", "0.4924172", "0.48997256", "0.48974133", "0.4894102", "0.4891074", "0.4871291", "0.48591062", "0.48573297", "0.48556888", "0.48553634", "0.48507002", "0.48418295", "0.4837338", "0.48081705", "0.48063722", "0.47767386", "0.4773098", "0.47657436", "0.4760418", "0.4756634", "0.47522107", "0.47389463", "0.47270718", "0.47226927", "0.47218487", "0.47211972", "0.47210985", "0.4716989", "0.4711186", "0.4711186", "0.4709974", "0.4702901", "0.46996763", "0.46731922", "0.46694314", "0.46638942", "0.46628794", "0.46525756", "0.46525756", "0.46525756", "0.46467242", "0.46355948", "0.46355772", "0.4634129", "0.46311876", "0.46263105", "0.46255696", "0.46200106", "0.46058443", "0.4600308", "0.459228", "0.45911208", "0.4586912", "0.4585162", "0.4583987", "0.4577361", "0.45772177", "0.45766088", "0.45712528", "0.45709753", "0.45621803", "0.4560813", "0.4557033", "0.45563662" ]
0.6729017
0
Establishes a upper limit for the forloop that parses the SWMM input file
def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']): global count with open(inputfilename, 'r') as swmmput: contents = swmmput.readlines() count = len(contents) return(count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _uppLim(self):\n if self.getResult(param='TS value')[0] >= self.tsmin:\n print(\"\\t=== TS value {} is above TSmin {}, no need to compute an upperlimit ===\"\n .format(self.getResult(param='TS value')[0], self.tsmin))\n return\n\n from UpperLimits import UpperLimits\n import UnbinnedAnalysis as UA\n \n like = UA.unbinnedAnalysis(evfile=self.outmktime, scfile=self.ft2, expmap=self.outexpmap,\n expcube=self.outltcube, irfs=self.irf, optimizer=\"NewMinuit\", srcmdl=self.model)\n like.fit(0)\n ul = UpperLimits(like)\n\n try:\n upp, norm=ul['TARGET'].bayesianUL(emin=self.emin, emax=self.emax, cl=0.95) \n except:\n upp = -1\n wf = open(self.outgtlike, 'a')\n wf.write(\"\\nUpper limit on source 'TARGET': {} ph/cm2/s.\".format(upp))\n wf.close()\n return", "def mbieLoop (self) :\n self.iterCnt = 0\n while self.iterCnt < 5000:\n s = self.mdp.s0\n for h in range(self.H) :\n self.QUpper = QBoundsSolver(self.mdp, self.PHat, self.QUpper, self.Ntotal, 0.1, True, self.stop)\n a = np.argmax(self.QUpper[s])\n s_, self.R[s,a] = self.mdp.step(s, a)\n self.updateVisitStatistics(s, a, s_)\n s = s_\n\n if self.iterCnt % 10 == 0: \n print(self.iterCnt)\n print(self.QUpper)\n\n self.iterCnt += 1", "def STAND_LIMIT() -> int:\n return 15", "def get_max_iters():\n return 2000", "def test_mem_limit_too_high():\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'mem-limit-too-high.ini'))\n with pytest.raises(UserReportError) as err:\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n assert err.value.returncode == INPUT_ERROR\n m = re.match(r'Memory limit.*exceeds', err.value.message)\n assert m is not None", "def test_max_N_too_small(self):\n\t\t\n\t\t\n\t\tparams = DEFAULT_PARAMS.copy()\n\t\tparams[MAX_N] = DEFAULT_MAX_EVALS+1\n\t\t\n\t\titerator = self.watcher.make_layer_iterator(model=self.model, params=params)\n\t\tfor ww_layer in iterator:\n\t\t\tif ww_layer.N > params[MAX_N]:\n\t\t\t\tself.assertTrue(ww_layer.skipped)\n\t\t\n\t\tdetails = self.watcher.describe(max_N=DEFAULT_MAX_EVALS+1)\n\t\tprint(details[['N','M']])\n\t\tself.assertEqual(10,len(details))\n\n\t\treturn", "def __init__(self, hard_limit=2000):\n self.hard_limit = hard_limit", "def MAXMEM(self):", "def time_limit(self):\n return 2503", "def adb_video_limit(given_limit):\n return given_limit", "def set_limit_max():\n limit_max = request.params.get(\"limit_max\", 0, type=float)\n output = request.params.get(\"output\", 1, type=int)\n retval = RP_LIB.rp_LimitMax(output, ctypes.c_float(limit_max))\n if retval != 0:\n LOG.error(\"Failed to set maximum output voltage. Error code: %s\", ERROR_CODES[retval])", "def test_exceeded_limit(self):\n msg=self.sample_data(\"error_exceeded_limit.xml\")\n error = ErrorParser().process_all(msg)\n assert isinstance(error, PatronLoanLimitReached)\n eq_(u'Patron cannot loan more than 12 documents', error.message)", "def adaptive_limit(self) -> int:\n return pulumi.get(self, \"adaptive_limit\")", "def quick_run_limit(self):\n try:\n return int(environment.get(\"Quick\"))\n except KeyError:\n return maxsize", "def __check_memory_limit(self, efile_path):\n try:\n log.debug('Checking %s for exceeded memory message from SLURM', efile_path)\n with open(efile_path) as f:\n if os.path.getsize(efile_path) > 2048:\n f.seek(-2048, os.SEEK_END)\n f.readline()\n for line in f.readlines():\n stripped_line = line.strip()\n if stripped_line == SLURM_MEMORY_LIMIT_EXCEEDED_MSG:\n return OUT_OF_MEMORY_MSG\n elif any(_ in stripped_line for _ in SLURM_MEMORY_LIMIT_EXCEEDED_PARTIAL_WARNINGS):\n return PROBABLY_OUT_OF_MEMORY_MSG\n except Exception:\n log.exception('Error reading end of %s:', efile_path)\n\n return False", "def get_request_velocity_threshold(self):\n try:\n f = open('resources/max_request_velocity.txt', 'r')\n max_request_velocity = float(f.readline())\n max_request_velocity *= 1.20\n except IOError:\n max_request_velocity = 10\n print(\"No max_request_velocity.txt file found download rate set at %s, you should run ddosw_baseline first!\"\n % max_request_velocity)\n return max_request_velocity", "def velocity_limit(self):\n return self._read(MX_VELOCITY_LIMIT)", "def max_mireds(self):\n return 333", "def get_max_readings( self ):\n return 2500", "def __init__(self, max_iterations=200):\n self.max_iterations = max_iterations\n self.w = None\n self.out = None", "def _determine_limit(self, limit):\n\n # Note: +1 is allowed here because it allows\n # the user to fetch one beyond to see if they\n # are at the end of the list\n if not limit:\n res = conf.api_configuration.max_returned_num + 1\n else:\n res = min(conf.api_configuration.max_returned_num + 1, limit)\n\n return res", "def limit(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"limit\")", "def limit(self, limit):\n raise NotImplementedError(\"This should have been implemented.\")", "def write_max_splits(io_stream):\n io_stream.write('value max_splits\\n1\\n')", "def readOptimizationResultsFile(self):\n requiredLineNo = 0\n self.createParamters()\n \n self.optimizationResultsFile = open(self.fileName, 'r')\n \n for lineIndex, line in enumerate(self.optimizationResultsFile):\n if lineIndex == 0:\n startingPhase1, startingPhase2 = line.split()\n self.startingPhase1, self.startingPhase2 = int(startingPhase1), int(startingPhase2)\n\n elif lineIndex == 1:\n init1, init2, elapsedGreen1, elapsedGreen2 = line.split()\n self.init1, self.init2 = float(init1), float(init2)\n\n elif lineIndex == 2:\n self.processPhaseDuration(line, self.leftCriticalPointsRing1, self.leftCriticalPointsRing2)\n self.processPhaseSequence()\n\n elif lineIndex == 3:\n self.processPhaseDuration(line, self.leftCriticalPointsRing1, self.leftCriticalPointsRing2)\n self.processPhaseSequence()\n\n elif lineIndex == 4:\n self.processPhaseDuration(line, self.leftCriticalPointsRing1, self.leftCriticalPointsRing2)\n self.processPhaseSequence()\n\n elif lineIndex == 5:\n self.processPhaseDuration(line, self.rightCriticalPointsRing1, self.rightCriticalPointsRing2)\n\n elif lineIndex == 6:\n self.processPhaseDuration(line, self.rightCriticalPointsRing1, self.rightCriticalPointsRing2)\n\n elif lineIndex == 7:\n self.processPhaseDuration(line, self.rightCriticalPointsRing1, self.rightCriticalPointsRing2)\n\n elif lineIndex == 14:\n noOfRequest = int(line)\n requiredLineNo = 15 + noOfRequest\n # break\n \n elif lineIndex >=15 and lineIndex < requiredLineNo:\n self.getPriorityRequests(line)\n \n elif lineIndex >=15 and lineIndex >= requiredLineNo:\n break\n # self.optimizationResultsFile = open(self.fileName, 'r')\n # for i, line in enumerate(self.optimizationResultsFile):\n # if i in range(15, requiredLineNo):\n\n self.optimizationResultsFile.close()\n # self.getPriorityRequests(requiredLineNo)\n \n self.getCummulativeValues()\n self.generateTimePhaseDiagram()", "def checkmaxmin(droi):\n \n # Loads data.\n fd = load_frames('gofparams')\n rawdata = loadresultsfile('results1.txt', 'Mean1')\n \n pardir = cmn.makepardir_data()\n movie = os.path.basename(os.path.abspath('.'))\n \n d = {}\n rois = droi.keys()\n \n # Finds the max and min points using the parameters specified in the list droi.\n for roi in rois:\n\n maxsurr, maxwinlen, maxtrshift, minsurr, minwinlen, mintrshift = droi[roi]\n \n dmax, dmin = peaklib.maxminanalysis(rawdata, maxsurr, maxwinlen, maxtrshift, minsurr,\n minwinlen, mintrshift)\n \n d[roi+'_dmax'] = dmax\n d[roi+'_dmin'] = dmin\n\n # Plots the raw traces with the max and min points indicated.\n for roi in rois:\n plt.figure(figsize=(14,10))\n peaklib.plotminmax(d[roi+'_dmax'], d[roi+'_dmin'], 'b', 1, 0)\n \n figname = movie+'_'+roi\n plt.title('{0} \\n {1} \\n frames = {2}-{3} ({4} total)'.format(figname, fd['condition'], \n fd['f1'], fd['f_end'], fd['f_end']-fd['f1']))\n \n figpath = peaklib.makefilepath(pardir, ANALYSISFOLDPNG, figname)\n plt.savefig(figpath)\n plt.close()\n \n # Writes the min/max data into a file with the function writei.\n \n ifilefold = ANALYSISFOLDTXT + '/' + movie + '/'\n ipath = peaklib.makesubdir(pardir, ifilefold)\n peaklib.writei(d, fd, ipath)", "def main():\n print(\"Running the progam template.\")\n\n filename = input(\"What is the File name?: \")\n f = open(filename, 'r')\n theSum = 0\n count = 0\n maximum = 0\n minimum = 9999999999999999999\n for line in f:\n count += 1\n lines = line.strip()\n number = int(line)\n theSum += number\n average = theSum / count\n if number > maximum:\n maximum = number\n if number < minimum:\n minimum = number\n\n print(\"The Number of Integers is:\",count) \n print(\"The Average is:\",average)\n print (\"The Maximum is:\",maximum)\n print(\"The Minimum is:\",minimum)\n f.close()\n print(\"Have a nice day!\")", "def loadMaxIPlist(self, filename):\r\n #I need to put this in a try/catch block later \r\n \r\n maxIPlist=10\r\n linecount=0 \r\n iplist=[]\r\n with open(filename, 'r') as infile:\r\n element = infile.readline()\r\n while element:\r\n \r\n linecount +=1\r\n if linecount < maxIPlist:\r\n iplist.append(element)\r\n element = infile.readline()\r\n \r\n self.objdict['IPADDRESS']=iplist\r\n print(\"Loaded \", linecount, \" ip addresses\")\r\n\r\n return(linecount)", "def findmax(h5file, pcoord_dim, fi, li):\n max_values = []\n for i in range(fi,li+1):\n i = str(i)\n iteration = \"iter_\" + str(numpy.char.zfill(i,8))\n pc = h5file['iterations'][iteration]['pcoord']\n maxv = numpy.max(pc[:,-1,pcoord_dim-1])\n max_values.append(maxv)\n maxmax = numpy.max(max_values)\n nw = numpy.where(max_values>(maxmax-maxmax*0.0001))\n iter_num = str((nw[0]+1)[0])\n \n wheretolook = \"iter_\" + str(numpy.char.zfill(iter_num,8))\n max_iter = h5file['iterations'][wheretolook]['pcoord'][:,-1,pcoord_dim-1]\n segmax = numpy.max(max_iter)\n nw2 = numpy.where(max_iter>(segmax-segmax*0.0001))\n seg_num = (nw2[0])[0]\n print (\"Maximum pcoord value for dimension\",pcoord_dim,\"is:\",segmax) \n print (\"It is segment:\",seg_num,\"of iteration:\",iter_num)", "def determinePlotLimits(self):\n max_str = \"up99\"\n min_str = \"dn99\"\n if self.keywords.get(\"limit_type\",\"99per\") == \"minmax\":\n max_str = \"max\"\n min_str = \"min\"\n \n # Determine the min/max of variables over all models\n limits = {}\n prune = False\n for fname in glob.glob(os.path.join(self.output_path,\"*.nc\")):\n with Dataset(fname) as dataset:\n if \"MeanState\" not in dataset.groups: continue\n group = dataset.groups[\"MeanState\"]\n variables = [v for v in group.variables.keys() if v not in group.dimensions.keys()]\n for vname in variables:\n var = group.variables[vname]\n pname = vname.split(\"_\")[0]\n region = vname.split(\"_\")[-1]\n if var[...].size <= 1: continue\n if space_opts.has_key(pname):\n if not limits.has_key(pname):\n limits[pname] = {}\n limits[pname][\"min\"] = +1e20\n limits[pname][\"max\"] = -1e20\n limits[pname][\"unit\"] = post.UnitStringToMatplotlib(var.getncattr(\"units\"))\n limits[pname][\"min\"] = min(limits[pname][\"min\"],var.getncattr(min_str))\n limits[pname][\"max\"] = max(limits[pname][\"max\"],var.getncattr(max_str))\n elif time_opts.has_key(pname):\n if not limits.has_key(pname): limits[pname] = {}\n if not limits[pname].has_key(region):\n limits[pname][region] = {}\n limits[pname][region][\"min\"] = +1e20\n limits[pname][region][\"max\"] = -1e20\n limits[pname][region][\"unit\"] = post.UnitStringToMatplotlib(var.getncattr(\"units\"))\n limits[pname][region][\"min\"] = min(limits[pname][region][\"min\"],var.getncattr(\"min\"))\n limits[pname][region][\"max\"] = max(limits[pname][region][\"max\"],var.getncattr(\"max\"))\n if not prune and \"Benchmark\" in fname and pname == \"timeint\":\n prune = True\n self.pruneRegions(Variable(filename = fname,\n variable_name = vname,\n groupname = \"MeanState\"))\n \n # Second pass to plot legends (FIX: only for master?)\n for pname in limits.keys():\n\n try:\n opts = space_opts[pname]\n except:\n continue\n \n # Determine plot limits and colormap\n if opts[\"sym\"]:\n vabs = max(abs(limits[pname][\"min\"]),abs(limits[pname][\"min\"]))\n limits[pname][\"min\"] = -vabs\n limits[pname][\"max\"] = vabs\n\n # if a score, force to be [0,1]\n if \"score\" in pname:\n limits[pname][\"min\"] = 0\n limits[pname][\"max\"] = 1\n\n limits[pname][\"cmap\"] = opts[\"cmap\"]\n if limits[pname][\"cmap\"] == \"choose\": limits[pname][\"cmap\"] = self.cmap\n\n # Plot a legend for each key\n if opts[\"haslegend\"]:\n fig,ax = plt.subplots(figsize=(6.8,1.0),tight_layout=True)\n label = opts[\"label\"]\n if label == \"unit\": label = limits[pname][\"unit\"]\n post.ColorBar(ax,\n vmin = limits[pname][\"min\"],\n vmax = limits[pname][\"max\"],\n cmap = limits[pname][\"cmap\"],\n ticks = opts[\"ticks\"],\n ticklabels = opts[\"ticklabels\"],\n label = label)\n fig.savefig(os.path.join(self.output_path,\"legend_%s.png\" % (pname))) \n plt.close()\n\n # Determine min/max of relationship variables\n for fname in glob.glob(os.path.join(self.output_path,\"*.nc\")):\n with Dataset(fname) as dataset:\n for g in dataset.groups.keys():\n if \"relationship\" not in g: continue\n grp = dataset.groups[g]\n if not limits.has_key(g):\n limits[g] = {}\n limits[g][\"xmin\"] = +1e20\n limits[g][\"xmax\"] = -1e20\n limits[g][\"ymin\"] = +1e20\n limits[g][\"ymax\"] = -1e20\n limits[g][\"xmin\"] = min(limits[g][\"xmin\"],grp.variables[\"ind_bnd\"][ 0, 0])\n limits[g][\"xmax\"] = max(limits[g][\"xmax\"],grp.variables[\"ind_bnd\"][-1,-1])\n limits[g][\"ymin\"] = min(limits[g][\"ymin\"],grp.variables[\"dep_bnd\"][ 0, 0])\n limits[g][\"ymax\"] = max(limits[g][\"ymax\"],grp.variables[\"dep_bnd\"][-1,-1])\n\n \n self.limits = limits", "def upperLimit(self):\r\n if len(self.elements) > 0:\r\n return self.elements[-1].upperLimit\r\n else:\r\n raise KeyError('No elements in Computation object')", "def maxtimes(self):\n vas = []\n file = self.read1()\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n vas.append(s_i)\n count = 0\n num = vas[0]\n for i in vas:\n freq = vas.count(i)\n if (freq > count): #pylint: disable = superfluous-parens\n count = freq\n num = i\n vas1 = []\n vas1.append(num)\n self.print(vas1)\n self.write(vas1)\n logging.debug(\"Starting with to\")\n return vas1", "def get_lmax_limit(self):\n\n if self.pixel == \"HEALPIX\":\n l_max_limit = 3 * self.nside - 1\n elif self.pixel == \"CAR\":\n cdelt = self.data.wcs.wcs.cdelt[1]\n l_max_limit = 360 / cdelt / 4\n return l_max_limit", "def get_support_max_limit(item, counts):\n return int(max(counts[item] / MIN_ALL_CONF, MIN_SUPPORT))", "def max_positions(self):\n return int(100000.0)", "def velocity_limit(self, value):\n self._write(MX_VELOCITY_LIMIT, value)", "def limit():\n bwc = BandwidthConfigurator()\n bwc.limit()", "def max_temp(self):\n return 99", "def read_current_limit(self):\n function_string = 'I' + self.output + '?'\n value_string = self.scpi_comm(function_string)\n try:\n value = float(value_string.replace('I' + self.output, ''))\n except ValueError:\n value = -999999\n return value", "def _get_length_max(self):\n # data_list = list(range(len(self.files_refined)))\n data_list = range(self.idx_max_length, len(self.files_refined))\n progress = tqdm(data_list)\n for pdb_id in progress:\n features_filt, geo_filt = self._get_features_geo_filtered(pdb_id)\n length = features_filt.shape[1]\n if (length > self.max_length):\n self.max_length = length\n progress.set_postfix({'pdb': self.files_refined[pdb_id],\n 'length': length,\n 'max_langth': self.max_length})\n save_checkpoint_feature(self.path_checkpoint_features, pdb_id, self.max_length, id)\n return self.max_length", "def max_position_limit(self):\n return self._read(MX_MAX_POSITION_LIMIT)", "def __init__(self):\n self.data = []\n self.min = sys.maxsize", "def max_output_buffer(self, i: \"int\") -> \"long\":\n return _beamforming_swig.doaesprit_sptr_max_output_buffer(self, i)", "def set_max_noutput_items(self, m: \"int\") -> \"void\":\n return _beamforming_swig.doaesprit_sptr_set_max_noutput_items(self, m)", "def __init__(self, otu_path, min, max, step, num_reps):\r\n self.rare_depths = range(min, max + 1, step)\r\n self.num_reps = num_reps\r\n #self.otu_table = parse_biom_table(open(otu_path,'U'))\r\n self.otu_table = self.getBiomData(otu_path)\r\n self.max_num_taxa = -1\r\n tmp = -1\r\n for val in self.otu_table.iterObservationData():\r\n if val.sum() > tmp:\r\n tmp = val.sum()\r\n self.max_num_taxa = tmp", "def abs_max_cool_setpoint_limit(self) -> int:\n return self.cluster.get(\"abs_max_cool_setpoint_limit\", 3200)", "def test_deploy_more_vms_than_limit_allows(self):\n self.test_limits(vm_limit=2)", "def read_file_agsm(self,filename):\n\n narr,larr,farr,iarr,nn,exceed_freqlim = \\\n aims_fortran.read_file_agsm(filename,config.npositive,config.agsm_cutoff, \\\n config.cutoff*self.cutoff)\n self.modes = np.array(zip(narr[0:nn],larr[0:nn],farr[0:nn],iarr[0:nn]),dtype=modetype)\n\n return exceed_freqlim", "def _limit_helper(stream: Union[BinaryIO, Generator, List], limit: int) -> Generator:\n for value in stream:\n yield value\n if limit == 1:\n return\n else:\n limit = limit - 1 # FIXME", "def _maximum(self) -> float:\n return self._config[CONF_MAX]", "def find_time_limits(files):\n tmin=0\n tmax=2**60\n for f in files:\n arr=np.load(f, mmap_mode=\"r\")[:,1]\n tmin=max(tmin, np.min(arr))\n tmax=min(tmax, np.max(arr))\n return tmin,tmax", "def __init__(self, train_para_limit, train_ques_limit, dev_para_limit, dev_ques_limit,\n ans_limit, char_limit, emb_file_name, num_workers=None, save_load_data=False,\n data_root_path='./data'):\n self._train_para_limit = train_para_limit\n self._train_ques_limit = train_ques_limit\n self._dev_para_limit = dev_para_limit\n self._dev_ques_limit = dev_ques_limit\n self._ans_limit = ans_limit\n self._char_limit = char_limit\n self._emb_file_name = emb_file_name\n self._is_cased_embedding = emb_file_name.startswith('glove.840')\n self._num_workers = num_workers\n self._save_load_data = save_load_data\n self._data_root_path = data_root_path\n\n self._processed_train_data_file_name = 'train_processed.json'\n self._processed_dev_data_file_name = 'dev_processed.json'\n self._word_vocab_file_name = 'word_vocab.bin'\n self._char_vocab_file_name = 'char_vocab.bin'", "def clicked_btn_find_upper_limit(self):\n spectral_model, proxy_index, index = self._get_selected_model(True)\n # Find the upper limit \n try:\n sigma = round(float(self.edit_ul_sigma.text()),1)\n except:\n logger.debug(\"Invalid sigma for finding limit\")\n return None\n upper_limit = spectral_model.find_upper_limit(sigma=sigma, start_at_current=True)\n # Refresh GUI\n self.measurement_view.update_row(proxy_index.row())\n self.summarize_current_table()\n self.update_fitting_options()\n self.refresh_plots()\n return None", "def size_limit(self):\n\t\treturn self._size_limit", "def calculate(self, limit):\r\n pass", "def execMaxpTabu(y, w, threshold=100.0, maxit=2, tabuLength=5, typeTabu=\"exact\"):\n print(\"Running max-p-regions model (Duque, Anselin and Rey, 2010)\")\n print(\"Local search method: Tabu Search\")\n print(\"Number of areas: \", len(y))\n print(\"threshold value: \", threshold)\n distanceType = \"EuclideanSquared\"\n distanceStat = \"Centroid\";\n objectiveFunctionType = \"SS\";\n selectionType = \"Minimum\";\n numRegionsType = \"EndogenousThreshold\";\n\n # CONSTRUCTION PHASE 1: GROWING FEASIBLE REGIONS\n\n start = tm.time()\n\n # print w\n # print y\n\n am = AreaManager(w, y, distanceType)\n maxP = 0\n bestCandidates = {}\n for i in range(maxit):\n\n # print \"**** Iteration %d of %d ...\"%(i+1,maxit)\n\n rm = RegionMaker(am,\n distanceType = distanceType,\n distanceStat = distanceStat,\n selectionType = selectionType,\n objectiveFunctionType = objectiveFunctionType,\n numRegionsType = numRegionsType,\n threshold = threshold)\n numRegions = len(rm.feasibleRegions)\n rm.getObj()\n\n # print \"rm.feasibleRegions\",rm.feasibleRegions\n # print \"obj\",rm.getObj()\n\n if numRegions > maxP:\n bestCandidates = {}\n maxP = numRegions\n obj = rm.objInfo\n bestCandidates[obj] = rm.feasibleRegions\n if numRegions == maxP:\n obj = rm.objInfo\n if obj in bestCandidates:\n pass\n else:\n bestCandidates[obj] = rm.feasibleRegions\n else:\n pass\n\n # print \"bestCandidates\", bestCandidates\n\n ofValues = list(bestCandidates.keys())\n basicMemory = BasicMemory()\n while len(ofValues) >= 1:\n\n # RECREATE SOLUTION\n\n rm.resetNow()\n minOfValue = min(ofValues)\n ofValues.remove(minOfValue)\n partialSolution = bestCandidates[minOfValue]\n\n # print \"ASSIGNING ENCLAVES\"\n # print partialSolution\n\n regionId = 0\n for growReg in partialSolution:\n seedGrowReg = partialSolution[growReg][0]\n rm.assignSeeds(seedGrowReg, regionId)\n partialSolution[growReg].remove(seedGrowReg)\n if len(partialSolution[growReg]) >= 1:\n for areaInGrow in partialSolution[growReg]:\n rm.assignArea(areaInGrow, regionId)\n regionId += 1\n\n # CONSTRUCTION PHASE 2: ENCLAVES ASSIGNATION\n\n rm.feasibleRegions = copy.deepcopy(rm.region2Area)\n rm.getIntraBorderingAreas()\n rm.newExternal = set(rm.unassignedAreas)\n if len(rm.unassignedAreas) != 0:\n rm.constructionStage = \"enclaves\"\n while len(rm.unassignedAreas) != 0:\n rm.constructRegions()\n rm.objInfo = rm.getObjective(rm.region2Area)\n rm.feasibleRegions = copy.deepcopy(rm.region2Area)\n rm.getIntraBorderingAreas()\n\n # print \"ASSIGNED SOLUTION\"\n # print \"OBJ: \", rm.getObjective(rm.region2Area), rm.returnRegions()\n\n rm.calculateRegionValueThreshold()\n\n # LOCAL SEARCH\n\n rm.calcObj()\n convTabu = min(10,old_div(len(y),maxP)) # convTabu=230*numpy.sqrt(maxP)\n\n # print \"###ENTERING TABU\",rm.objInfo,rm.returnRegions()\n\n rm.tabuMove(tabuLength, convTabu = convTabu, typeTabu=typeTabu)\n rm.calcObj()\n\n # print \"***** AFTER TABU\",rm.objInfo,rm.returnRegions()\n # EVALUATE SOLUTION\n\n if rm.objInfo < basicMemory.objInfo:\n basicMemory.updateBasicMemory(rm)\n time = tm.time() - start\n Sol = basicMemory.regions\n Of = basicMemory.objInfo\n print(\"FINAL SOLUTION: \", Sol)\n print(\"FINAL OF: \", Of)\n output = { \"objectiveFunction\": Of,\n \"runningTime\": time,\n \"algorithm\": \"maxpTabu\",\n \"regions\": len(Sol),\n \"r2a\": Sol,\n \"distanceType\": distanceType,\n \"distanceStat\": distanceStat,\n \"selectionType\": selectionType,\n \"ObjectiveFuncionType\": objectiveFunctionType}\n print(\"Done\")\n return output", "def max_voltage_limit(self):\n return self._read(MX_MAX_VOLTAGE_LIMIT)", "def max_output_buffer(self, i):\n return _spacegrant_swig.G3RUH_descramble_sptr_max_output_buffer(self, i)", "def max_temp(self):\n return 30", "def minsize(self):# -> int:\r\n return 0", "def one_batch_test_int_solver(prevalence_rate,typeII_error, typeI_error,batch_limit,n_initial_guess = 2):\n\n \n sol_float = one_batch_test_solver(prevalence_rate,typeII_error, typeI_error, n_initial_guess)\n floor, ceil = np.floor(sol_float), np.ceil(sol_float)\n func = lambda batch_size: 1/batch_size + 1 - typeII_error -(1 - typeII_error - typeI_error)*(1-prevalence_rate)**batch_size\n if func(floor) < func(ceil):\n temp = int(floor)\n else:\n temp = int(ceil)\n if temp <= batch_limit:\n return temp\n else:\n return int(batch_limit)", "def max_output_buffer(self, i: \"int\") -> \"long\":\n return _beamforming_swig.beamformer_sptr_max_output_buffer(self, i)", "def __len__(self):\n if not hasattr(self.limitedstream, \"limit\"):\n return 0\n return self.limitedstream.limit", "def pwm_limit(self):\n return self._read(MX_PWM_LIMIT)", "def brute(limit):\n c_lengths = {s: collatz_length(s) for s in range(1, limit+1)}\n return max(c_lengths, key=lambda x: c_lengths[x])", "def set_max_noutput_items(self, m: \"int\") -> \"void\":\n return _beamforming_swig.beamformer_sptr_set_max_noutput_items(self, m)", "def _set_max_steps(self, mx):\n self._max = max(0, mx)\n\n if self._max:\n self._step_width = Helper.len(str(self._max))\n else:\n self._step_width = 4", "def find_max_bin(self):\n x = self.local['clip']\n midrange = x[int(len(x)*0.2):int(len(x)*.2 + int(len(x)*.5))]\n self.max_bin = max(midrange)", "def findMaxFactor(self):\n factorMax = 0\n factorMaxInd = ''\n for ue in list(self.ues.keys()):\n if len(self.ues[ue].bearers[0].buffer.pckts)>0 and self.ues[ue].pfFactor>factorMax:\n factorMax = self.ues[ue].pfFactor\n factorMaxInd = ue\n if factorMaxInd=='':\n ue = list(self.ues.keys())[self.ind_u]\n q = 0\n while len(self.ues[ue].bearers[0].buffer.pckts)==0 and q<len(self.ues):\n self.updIndUE()\n ue = list(self.ues.keys())[self.ind_u]\n q = q + 1\n factorMaxInd = ue\n\n return factorMaxInd", "def __init__(self):\n self.min = sys.maxsize\n self.stk = deque()", "def onSkipSegLimit(self):\r\n profprint()\r\n #research\r\n logic = self.logic\r\n logic.placeAxialLimitMarker(assign=False)", "def _process_threadpool_limits_initializier():\n import numpy # required for loky's autodetection\n from threadpoolctl import threadpool_limits\n\n threadpool_limits(limits=1)", "def setOutMax(self, out_max):\n\t\tself.out_max = out_max", "def get_max_run(run):\n max = 0\n max_i = 0\n for i in range(800, 900):\n if int(run[i]) > int(max):\n max = run[i]\n max_i = i\n return max, max_i", "def test_large_flmb(self):\n test_files_218 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-218*.mdd')\n\n mdd.procall(test_files_218)\n\n data_orig = self.read_full_file('node59p1.dat')\n\n # two status files from different controllers, 12371 and 12365\n data_out_71 = self.read_full_file('node59p1_0.status_1237101.dat')\n self.check_sio_type(data_out_71, ['CS', 'PS'])\n data_out_65 = self.read_full_file('node59p1_0.status_1236501.dat')\n self.check_sio_type(data_out_65, ['CS', 'PS'])\n data_out = data_out_71\n data_out += data_out_65\n\n data_adcps = self.read_full_file('node59p1_0.adcps_1237111.dat')\n self.check_sio_type(data_adcps, ['AD'])\n data_out += data_adcps\n\n data_ctdmo = self.read_full_file('node59p1_0.ctdmo_1237100.dat')\n self.check_sio_type(data_ctdmo, ['CT', 'CO'])\n data_out += data_ctdmo\n\n data_dosta = self.read_full_file('node59p1_0.dosta_1236501.dat')\n self.check_sio_type(data_dosta, ['DO'])\n data_out += data_dosta\n\n data_flort = self.read_full_file('node59p1_0.flort_1236501.dat')\n self.check_sio_type(data_flort, ['FL'])\n data_out += data_flort\n\n data_phsen = self.read_full_file('node59p1_0.phsen_1236501.dat')\n self.check_sio_type(data_phsen, ['PH'])\n data_out += data_phsen\n\n if not TestSioUnpack.compare_sio_matches(data_orig, data_out):\n self.fail(\"Failed sio block compare\")\n\n test_files = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-205*.mdd')\n test_files_217 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-217*.mdd')\n test_files_219 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-219*.mdd')\n\n test_files.extend(test_files_217)\n test_files.extend(test_files_219)\n\n mdd.procall(test_files)\n\n data_out = self.compare_node59(1, data_out)\n\n test_files = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-233*.mdd')\n test_files_231 = glob.glob(INPUT_FLMB_PATH + '/unit_363-2013-231*.mdd')\n\n test_files.extend(test_files_231)\n\n mdd.procall(test_files)\n\n self.compare_node59(2, data_out)", "def read_values_from_file(input_file, debug = True):\n with open(input_file, 'r') as infile:\n #Read the data from the file\n input_data = infile.readline()\n\n #And extract the required parameters\n n = int(input_data.split()[0]) #number of months to run experiment\n m = int(input_data.split()[1]) #maximum age of rabbits\n \n if debug:\n print(\"From file %s I have read the following data:\" % input_file)\n print(\"n = %s\\tm = %s\\n\" % (n, m))\n \n return(n, m)", "def getSPL(self):\n\n fname = os.path.join(self.run_dir,self.run_name + '.spl')\n if (not os.path.exists(fname)):\n sys.stderr.write ('getMaxSPL: {:} does not exist\\n'.format(fname))\n return None\n\n spl = myloadtxt(fname,skiprows=9)\n maxspl = np.amax(spl[:,2:])\n return maxspl", "def getMaxMancount(self):\n return self.__size * 20", "def set_max_noutput_items(self, m):\n return _spacegrant_swig.G3RUH_descramble_sptr_set_max_noutput_items(self, m)", "def storage_upper_bound(index):\n i = index[0]\n return storage_para[i].pmax", "def __init__(self, num_beams, max_length, length_penalty, early_stopping):\n self.max_length = max_length - 1 # ignoring bos_token\n self.length_penalty = length_penalty\n self.early_stopping = early_stopping\n self.num_beams = num_beams\n self.beams = []\n self.worst_score = 1e9", "def cmgProcessFile(f, offsetRecs=0, numRecs=None, elementsPerRec=4):\n uStart,joiner,uStop = fileTimeRange(f)\n print offsetRecs, numRecs, elementsPerRec\n data = readPadFile(f, offsetRecs=offsetRecs, numRecs=numRecs, elementsPerRec=elementsPerRec)\n\n print '(%s) %.3f ' % (split(f,os.path.sep)[-1], uStart),\n iMax = getMaxAbs(data)\n\n## zugmax = (10.0**6)*(data[iMax[-1],-1]) # for ossbtmf\n## print '%7.1f @ %d' % (zugmax,iMax[-1])\n\n zugmax = (10.0**6)*(data[iMax[-3],-3]) # for ossraw\n print '%7.1f @ %d' % (zugmax,iMax[-3])", "def _update_size(self, f_raw):\n seat_flag = False\n for line in f_raw.splitlines():\n line = process_str(line)\n if line == \"seats\" or line == \"seat\":\n seat_flag = True\n elif line == \"specifiers\" or line == \"specifier\":\n seat_flag = False\n elif seat_flag:\n # Skip if empty line\n if line == \"\":\n continue\n\n # Otherwise check if seat range is new max\n for seat_range in line.split(','):\n # Skip if empty range\n if seat_range == '':\n continue\n\n cur_row, cur_col = seat_inds(seat_range.split('-')[-1])\n\n if cur_row > self.max_row:\n self.max_row = cur_row\n if cur_col > self.max_col:\n self.max_col = cur_col", "def setMaxValue(self, max_value):\r\n\t\tself.MaxValue = max_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def setMaxValue(self, max_value):\r\n\t\tself.MaxValue = max_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def ConstrRank():\n with open(path.join(MAIN_PATH, RANK)) as f:\n ranked_data = []\n for line in f:\n ranked_data.append(line.strip().split()[0]) \n\n threshold = 5000\n global rank_less\n global rank_more\n rank_less = ranked_data[:threshold]\n rank_more = ranked_data[threshold:]\n\n with open(path.join(MAIN_PATH, INST)) as f:\n for line in f:\n line = line.strip().split(\",\")\n exists.append(line[0:2])", "def max_pp(level):\n base_pp = 6\n level_pp = 2 * level\n return base_pp + (level_pp - 2)", "def rdmb(vs, mbs, out_file_base, prms, max_ite=2000):\n\n nei, dnei, fvnei, fv, dnei6mean, dnei6std = get_nei_force(vs, mbs)\n\n dneiN = dnei / dnei6mean\n\n n = len(vs)\n max_nei = len(nei[0])\n\n er = mbs.effective_radius(n)\n\n A, B, C, D, E, F, G = prms\n\n synUmax = 0.23\n synVmax = 0.50\n ucmax = 6.0\n\n dt = 1.0e-2\n\n Du = 0.5\n Dv = 10.0\n\n # RR = 30\n RR = 80\n\n u = np.random.rand(n) * ucmax\n v = np.random.rand(n) * ucmax\n\n rea_u = np.zeros(n)\n rea_v = np.zeros(n)\n syn_u = np.zeros(n)\n syn_v = np.zeros(n)\n\n Ru = Du / (dneiN**2)\n Rv = Dv / (dneiN**2)\n\n Ru[dnei > er] = 0\n Rv[dnei > er] = 0\n\n save_rd_prms(out_file_base+\"_prms.npz\",\n vs, mbs,\n A, B, C, D, E, F, G,\n synUmax, synVmax, ucmax, dt, Du, Dv, RR)\n save_rd_uv(out_file_base+\"_{:05}.npz\".format(0), u, v)\n\n for ite in range(max_ite):\n syn_u = A * u - B * v + C\n syn_v = E * u - F\n\n syn_u[syn_u < 0] = 0\n syn_u[syn_u > synUmax] = synUmax\n syn_v[syn_v < 0] = 0\n syn_v[syn_v > synVmax] = synVmax\n\n rea_u = syn_u - D * u\n rea_v = syn_v - G * v\n\n uu = (Ru * (u[nei] - np.einsum('i,j->ij', u,\n np.ones(max_nei)))).sum(axis=1)\n vv = (Rv * (v[nei] - np.einsum('i,j->ij', v,\n np.ones(max_nei)))).sum(axis=1)\n\n u += (RR * rea_u + uu) * dt\n v += (RR * rea_v + vv) * dt\n\n if ((ite+1) % 500) == 0:\n print(\"[ite: {}]\".format(ite+1))\n\n fname = out_file_base + \"_{:05}.npz\".format(max_ite)\n save_rd_uv(fname, u, v)\n\n return True", "def parse_sequence_lengths(filepath, base_pair_limit):\n\n total_count = 0\n limit_count = 0\n with open(filepath) as f:\n line = f.readline()\n while line:\n if line.startswith('@'):\n total_count += 1\n seq = f.readline()\n sep = f.readline()\n qual = f.readline()\n if len(seq.strip()) > base_pair_limit:\n limit_count += 1\n line = f.readline()\n\n return limit_count / total_count", "def get_max_mid_diameter(self):\n max_min_mid_diam = 0\n\n for m in self.components:\n name = m.name\n diam_file = join(\n self.params['molec_dir'],\n name+'_size.csv'\n )\n\n if exists(diam_file.replace('.csv', '.TOOBIG')):\n max_min_mid_diam = 0\n print(f'{m.name} too big based on MW')\n break\n if exists(diam_file.replace(\n 'size.csv',\n 'unopt.ETKDGFAILED'\n )):\n max_min_mid_diam = 0\n print(f'{m.name} failed ETKDG')\n break\n results = pd.read_csv(diam_file)\n min_mid_diam = min(results['diam2'])\n max_min_mid_diam = max([min_mid_diam, max_min_mid_diam])\n\n self.max_min_mid_diam = max_min_mid_diam", "def findmaxmin(input_file):\n\tE_list = sub.check_output(\"check_maxmin.py {}\".format(input_file), shell=True).decode(\"utf-8\")\n\tEmax = float(re.search(r\"Maximum\\s*:\\s*(([+-]|\\s)\\d*\\.\\d+)\", E_list).group(1))\n\tEmin = float(re.search(r\"Minimum\\s*:\\s*(([+-]|\\s)\\d*\\.\\d+)\", E_list).group(1))\n\treturn Emax, Emin", "def check_limit(limit_value):\n try:\n limit = int(limit_value)\n except ValueError:\n raise SystemExit('The argument \"limit\" should be a positive number')\n else:\n if limit < 1:\n raise SystemExit('The argument \"limit\" should be greater than 0')\n else:\n return limit", "def __init__(self, max_temp, cool_rate, eval_limit, benchmark):\n super().__init__(size=1,eval_limit=eval_limit, benchmark=benchmark)\n self._max_temp = max_temp\n self._cool_rate = cool_rate\n self._temp = None\n self._step = 0", "def upper_limit(\n data, model, scan=None, level=0.05, return_results=False, **hypotest_kwargs\n):\n if scan is not None:\n return linear_grid_scan(\n data, model, scan, level, return_results, **hypotest_kwargs\n )\n # else:\n bounds = model.config.suggested_bounds()[\n model.config.par_slice(model.config.poi_name).start\n ]\n obs_limit, exp_limit, results = toms748_scan(\n data,\n model,\n bounds[0],\n bounds[1],\n from_upper_limit_fn=True,\n **hypotest_kwargs,\n )\n if return_results:\n return obs_limit, exp_limit, results\n return obs_limit, exp_limit", "def __init__(self, max_num_of_rounds_to_retain=100, num_of_last_check_rounds_consider=2):\n self.data = list()\n self.max_num_of_rounds_to_retain = max_num_of_rounds_to_retain\n self.num_of_last_check_rounds_consider = num_of_last_check_rounds_consider", "def maximize(self, budget, optimizer):\n\n\t\tpass", "def set_progress_range(self, maximum):\r\n\r\n pass", "def test_Max_Iteration_Negative(self):\n\t\tself.assertRaises(calc.OutOfRangeError, calc.it, M([[1 + 1j]]), 0 + 0j, -10)", "def export_results_max():\n\n # initialise the list of generated files\n gen_files = []\n\n ######\n # Define allowed variable names and associated equations to generate values.\n ######\n # Note that mannings n (friction value) is taken as 0.01, as in the model\n # run density of water is 1000\n var_equations = {'stage': enm.export_newstage_max,\n 'oldstage': 'stage',\n 'momentum': '(xmomentum**2 + ymomentum**2)**0.5',\n 'olddepth': 'oldstage-elevation',\n 'depth': edm.export_depthonland_max,\n 'speed': '(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-6)',\n 'energy': '(((xmomentum/(stage-elevation+1.e-6))**2'\n ' + (ymomentum/(stage-elevation+1.e-6))**2)'\n '*0.5*1000*(stage-elevation+1.e-6))+(9.81*stage*1000)',\n 'bed_shear_stress': ('(((1/(stage-elevation+1.e-6)**(7./3.))*1000*9.81*0.01**2*(xmomentum/(stage-elevation+1.e-6))*((xmomentum/(stage-elevation+1.e-6))**2+(ymomentum/(stage-elevation+1.e-6))**2)**0.5)**2'\n '+ ((1/(stage-elevation+1.e-6)**(7./3.))*1000*9.81*0.01**2*(ymomentum/(stage-elevation+1.e-6))*((xmomentum/(stage-elevation+1.e-6))**2+(ymomentum/(stage-elevation+1.e-6))**2)**0.5)**2)**0.5'),\n 'elevation': 'elevation'}\n\n ######\n # Start script, running through variables, area, sww file\n ######\n\n for which_var in project.layers_list:\n which_var = which_var.lower()\n log.info(\"Exporting value: %s\" % which_var)\n\n if which_var not in var_equations:\n log.critical('Unrecognized variable name: %s' % which_var)\n break\n\n project.export_area = project.export_area.lower()\n if project.export_area == 'all':\n easting_min = None\n easting_max = None\n northing_min = None\n northing_max = None\n elif project.export_area == 'aoi':\n easting_min = project.xminAOI\n easting_max = project.xmaxAOI\n northing_min = project.yminAOI\n northing_max = project.ymaxAOI\n else:\n log.critical('Unrecognized area name: %s' % project.export_area)\n break\n\n name = os.path.join(project.output_folder, project.scenario)\n\n outname = name + '_' + project.export_area + '_' + which_var\n quantityname = var_equations[which_var]\n\n log.info('Generating output file: %s' % (outname+'.asc'))\n\n # assume 'quantityname' is a string, handle in the old way,\n # else call the handler function (same params as anuga.sww2dem)\n if isinstance(quantityname, basestring):\n export_func = anuga.sww2dem\n elif callable(quantityname):\n export_func = quantityname\n\n export_func(name+'.sww', outname+'.asc', quantity=quantityname,\n reduction=max, cellsize=project.cell_size,\n easting_min=easting_min, easting_max=easting_max,\n northing_min=northing_min, northing_max=northing_max,\n verbose=False)\n\n # add generated filename to result list\n gen_files.append(outname+'.asc')\n\n return gen_files", "def test_get_remain_limit(self):\n finder = FinderInsidePro(self.test_key)\n limit = finder.get_remain_limit()\n assert isinstance(limit, int)\n assert limit > 0", "def max_position_limit(self, value):\n self._write(MX_MAX_POSITION_LIMIT, value)" ]
[ "0.6134237", "0.6107441", "0.5806846", "0.5753623", "0.56226695", "0.5602243", "0.5540518", "0.54625756", "0.5389855", "0.5387457", "0.53571594", "0.53309345", "0.53300595", "0.53155893", "0.5291036", "0.5275519", "0.52754134", "0.523921", "0.5234351", "0.52126867", "0.5208347", "0.5183009", "0.51779324", "0.51667035", "0.5162829", "0.5161661", "0.5154646", "0.5151347", "0.51507944", "0.5150383", "0.513709", "0.5132915", "0.5113824", "0.5105475", "0.51025504", "0.5093836", "0.5092494", "0.50898904", "0.5085073", "0.50713485", "0.5055327", "0.50520355", "0.5049881", "0.5049038", "0.50460905", "0.5040484", "0.5030255", "0.5023558", "0.501871", "0.5018291", "0.5014046", "0.50129116", "0.5010937", "0.50060236", "0.5004965", "0.49998698", "0.4999829", "0.49977896", "0.49901232", "0.49772263", "0.49767587", "0.49754378", "0.49717176", "0.49709645", "0.49697086", "0.4968932", "0.49648207", "0.4963076", "0.4955078", "0.49505436", "0.49499646", "0.49399164", "0.49273512", "0.49262914", "0.49258843", "0.4924117", "0.49197167", "0.4916759", "0.49166974", "0.49095348", "0.49063116", "0.49054542", "0.49052203", "0.49038386", "0.49038386", "0.48994225", "0.48968798", "0.48967654", "0.489519", "0.48943105", "0.4894133", "0.48922", "0.4892174", "0.4891346", "0.48902592", "0.4886754", "0.48825523", "0.4881032", "0.48782256", "0.48778346", "0.48757327" ]
0.0
-1
Opens and reads the parameters in the [SUBCATCHMENT] and [SUBAREA] headers within the SWMM input file. Adds these parameters (as strings) to a numpy array
def read_initial_parameters(inputfilename): subc_params = [] subarea_params = [] global subc_names subc_names = [] subcatchment_parameters = [] inputfile = open(inputfilename, 'r') for line in inputfile: if(line.find("[SUBCATCHMENTS]") != -1): line = inputfile.readline() for i in range(count): templine = list(line) if templine[0] == ";" or templine[0] == " " or len(templine) < 10: line = inputfile.readline() continue elif (line.find("[") != -1): break else: linesplit = line.split() subc_params.append(linesplit[4:7]) subc_names.append(linesplit[0]) line = inputfile.readline() if (line.find("[SUBAREAS]") != -1): line = inputfile.readline() for i in range(count): templine = list(line) if templine[0] == ";" or templine[0] == " " or len(templine) < 10: line = inputfile.readline() continue elif (line.find("[") != -1): break else: linesplit = line.split() subarea_params.append(linesplit[1:6]) line = inputfile.readline() inputfile.close() #Part of the function that experiments with np array. Potentially removes the need for the list transformation # functions that chew up a lot of time. Each subcatchment has a row, each parameter type has a column. global subcatchment_parameters_np subcatchment_parameters_np = np.empty((len(subc_params[0]) + len(subarea_params[0]), len(subc_params)), dtype=float) for row in range(len(subc_params)): for col in range(len(subc_params[0])): subcatchment_parameters_np[row, col] = float(subc_params[row][col]) for row in range(len(subarea_params)): for col in range(len(subarea_params[0])): subcatchment_parameters_np[row, col + len(subc_params[0])] = float(subarea_params[row][col]) #Old string code # for i in range(len(subc_params)): # for j in range(len(subarea_params[i])): # subc_params[i].append(subarea_params[i][j]) # subcatchment_parameters.append(subc_params[i]) return(np_subcatchment_parameters)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subcatch(ini_file='subcatch.ini'):\n config.read(ini_file)\n print 'Read the file ', ini_file\n\n file_in = config.get('file_in', 'file_in')\n\n file_out = config.get('file_out', 'file_out')\n\n picture_out = config.get('picture_out', 'picture_out')\n\n Xoutlet = config.getfloat('coord_outlet', 'Xoutlet')\n Youtlet = config.getfloat('coord_outlet', 'Youtlet')\n\n nb_param = config.getfloat('flags', 'nb_param')\n X = config.getfloat('flags', 'X')\n\n #Reading of parameter file\n print 'Reading parameter file'\n ar_cell_label, ar_coorx, ar_coory, ar_lambda, ar_Xc, ar_dam, ar_tan_beta, \\\n ar_tan_beta_channel, ar_L, ar_Ks, ar_theta_r, ar_theta_s, ar_n_o, ar_n_c, \\\n ar_cell_down, ar_pVs_t0, ar_Vo_t0, ar_Qc_t0, ar_kc \\\n = pm.read_cell_parameters(file_in)\n\n #Search for the cell close to the coordinates\n print 'Search for the outlet cell'\n cell_outlet = find_cell_coordinates(ar_cell_label, Xoutlet,\n Youtlet, ar_coorx, ar_coory, ar_lambda)\n\n #Search for the catchment cells\n print 'Search for the catchment cells'\n subcatch_label = all_up_cell(cell_outlet, ar_cell_down, ar_cell_label)\n\n #Select the subcatchmnent parameters\n print 'Select the subcatchmnent parameters'\n tab_param = np.zeros((len(subcatch_label),nb_param))\n new_label = np.arange(len(subcatch_label))\n\n tab_param[:,0] = new_label#ar_cell_label[subcatch_label]\n tab_param[:,1] = ar_coorx[subcatch_label]\n tab_param[:,2] = ar_coory[subcatch_label]\n tab_param[:,3] = ar_lambda[subcatch_label]\n tab_param[:,4] = ar_Xc[subcatch_label]\n tab_param[:,5] = ar_dam[subcatch_label]\n tab_param[:,6] = ar_tan_beta[subcatch_label]\n tab_param[:,7] = ar_tan_beta_channel[subcatch_label]\n tab_param[:,8] = ar_L[subcatch_label]\n tab_param[:,9] = ar_Ks[subcatch_label]\n tab_param[:,10] = ar_theta_r[subcatch_label]\n tab_param[:,11] = ar_theta_s[subcatch_label]\n tab_param[:,12] = ar_n_o[subcatch_label]\n tab_param[:,13] = ar_n_c[subcatch_label]\n for i in range(len(subcatch_label)):\n if i == 0:\n tab_param[i,14] = -9999.0\n else:\n ind = np.where(ar_cell_label[subcatch_label]\n == ar_cell_down[subcatch_label][i])\n\n tab_param[i,14] = new_label[ind]\n\n tab_param[:,15]=ar_pVs_t0[subcatch_label]\n tab_param[:,16]=ar_Vo_t0[subcatch_label]\n tab_param[:,17]=ar_Qc_t0[subcatch_label]\n tab_param[:,18]=ar_kc[subcatch_label]\n\n #~~~~~~Write parameter file~~~~~~#\n np.savetxt(file_out, tab_param)\n\n ar_image=ar_cell_label*0.\n ar_image[subcatch_label]=1.\n ar_image[ar_lambda==1.]=10.\n ar_image[cell_outlet]=5.\n field_map(ar_image, ar_coorx, ar_coory, X, picture_out, 'Subcatchment')", "def Read_RMCA_basic(Complete_Path):\n fid = open(Complete_Path,'r')\n S = []\n while 1: \n line = fid.readline()\n if line =='': \n break \n else :\n S.append(float(line))\n #R.append(float(line[27:-2]))\n return np.array(S)", "def readParams(file_name):\n try:\n info = np.load(file_name,allow_pickle=True)[()]\n except FileNotFoundError:\n if file_name.split('/')[-2] == 'checkpoint':\n lfc_id_dir = '/expres/extracted/lfc_cal/lfc_id/'\n file_name = lfc_id_dir + os.path.basename(file_name)\n info = np.load(file_name,allow_pickle=True)[()]\n else:\n raise FileNotFoundError\n # Assemble information into \"fit-able\" form\n num_orders = len(info['params'])\n lines = [p[:,1] for p in info['params'] if p is not None]\n errs = [np.sqrt(cov[:,1,1]) for cov in info['cov'] if cov is not None]\n ordrs = [o for o in np.arange(86) if info['params'][o] is not None]\n waves = [w for w in info['wvln'] if w is not None]\n # I believe, but am not sure, that the wavelengths are multiplied by order\n # to separate them from when orders overlap at the edges\n waves = [wvln for order, wvln in zip(ordrs,waves)]\n ordrs = [np.ones_like(x) * m for m,x in zip(ordrs, lines)]\n\n x = np.concatenate(lines)\n y = np.concatenate(ordrs)\n e = np.concatenate(errs)\n w = np.concatenate(waves)\n # Note: default of pipeline includes ThAr lines, which we're not including here\n \n return (x,y,w,e)", "def read_data(filename):\n # Store debug mode\n debug = params.debug\n params.debug = None\n\n # Initialize dictionary\n header_dict = {}\n\n headername = filename + \".hdr\"\n\n with open(headername, \"r\") as f:\n # Replace characters for easier parsing\n hdata = f.read()\n hdata = hdata.replace(\",\\n\", \",\")\n hdata = hdata.replace(\"\\n,\", \",\")\n hdata = hdata.replace(\"{\\n\", \"{\")\n hdata = hdata.replace(\"\\n}\", \"}\")\n hdata = hdata.replace(\" \\n \", \"\")\n hdata = hdata.replace(\";\", \"\")\n hdata = hdata.split(\"\\n\")\n\n # Loop through and create a dictionary from the header file\n for i, string in enumerate(hdata):\n if ' = ' in string:\n header_data = string.split(\" = \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n elif ' : ' in string:\n header_data = string.split(\" : \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n\n # Reformat wavelengths\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"{\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"}\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\" \", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].split(\",\")\n\n # Create dictionary of wavelengths\n wavelength_dict = {}\n for j, wavelength in enumerate(header_dict[\"wavelength\"]):\n wavelength_dict.update({float(wavelength): float(j)})\n\n # Replace datatype ID number with the numpy datatype\n dtype_dict = {\"1\": np.uint8, \"2\": np.int16, \"3\": np.int32, \"4\": np.float32, \"5\": np.float64, \"6\": np.complex64,\n \"9\": np.complex128, \"12\": np.uint16, \"13\": np.uint32, \"14\": np.uint64, \"15\": np.uint64}\n header_dict[\"data type\"] = dtype_dict[header_dict[\"data type\"]]\n\n # Read in the data from the file\n raw_data = np.fromfile(filename, header_dict[\"data type\"], -1)\n\n # Reshape the raw data into a datacube array\n array_data = raw_data.reshape(int(header_dict[\"lines\"]),\n int(header_dict[\"bands\"]),\n int(header_dict[\"samples\"])).transpose((0, 2, 1))\n\n if \"default bands\" in header_dict:\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"{\", \"\")\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"}\", \"\")\n default_bands = header_dict[\"default bands\"].split(\",\")\n\n pseudo_rgb = cv2.merge((array_data[:, :, int(default_bands[0])],\n array_data[:, :, int(default_bands[1])],\n array_data[:, :, int(default_bands[2])]))\n\n else:\n max_wavelength = max([float(i) for i in wavelength_dict.keys()])\n min_wavelength = min([float(i) for i in wavelength_dict.keys()])\n # Check range of available wavelength\n if max_wavelength >= 635 and min_wavelength <= 490:\n id_red = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 710)\n id_green = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 540)\n id_blue = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 480)\n\n pseudo_rgb = cv2.merge((array_data[:, :, [id_blue]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n else:\n # Otherwise take 3 wavelengths, first, middle and last available wavelength\n id_red = int(header_dict[\"bands\"]) - 1\n id_green = int(id_red / 2)\n pseudo_rgb = cv2.merge((array_data[:, :, [0]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n\n # Gamma correct pseudo_rgb image\n pseudo_rgb = pseudo_rgb ** (1 / 2.2)\n # Scale each of the channels up to 255\n pseudo_rgb = cv2.merge((rescale(pseudo_rgb[:, :, 0]),\n rescale(pseudo_rgb[:, :, 1]),\n rescale(pseudo_rgb[:, :, 2])))\n\n max_wl = float(str(header_dict[\"wavelength\"][-1]).rstrip())\n min_wl = float(str(header_dict[\"wavelength\"][0]).rstrip())\n\n # Create an instance of the spectral_data class\n spectral_array = Spectral_data(array_data=array_data, max_wavelength=max_wl,\n min_wavelength=min_wl, d_type=header_dict[\"data type\"],\n wavelength_dict=wavelength_dict, samples=int(header_dict[\"samples\"]),\n lines=int(header_dict[\"lines\"]), interleave=header_dict[\"interleave\"],\n wavelength_units=header_dict[\"wavelength units\"], array_type=\"datacube\",\n pseudo_rgb=pseudo_rgb, filename=filename)\n\n # Reset debug mode\n params.debug = debug\n\n if params.debug == \"plot\":\n # Gamma correct pseudo_rgb image\n plot_image(pseudo_rgb)\n elif params.debug == \"print\":\n print_image(pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + \"_pseudo_rgb.png\"))\n\n return spectral_array", "def read(self):\n # open the .SPE file\n with open(self._input_file_path, 'rb') as f:\n lines = f.readlines()\n # Create an empty dictionary for the metadata\n metadata_dictionary = {}\n\n # Search through the file for the needed metadata\n metadata_dictionary['date_acquired'] = re.search(b'date=\"(.*?)\"', lines[1])[1].decode('ANSI') \n metadata_dictionary['width'] = int(re.search(b'width=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['height'] = int(re.search(b'height=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['size'] = metadata_dictionary['width']*metadata_dictionary['height']\n metadata_dictionary['exposure_time'] = int(re.search(b'<ExposureTime type=\"Double\">(.*?)</ExposureTime>', lines[1])[1])\n metadata_dictionary['excitation_wavelength'] = float(re.search(b'laserLine=\"(.*?)\"',lines[1])[1])\n metadata_dictionary['center_wavelength'] = float(re.search(b'<CenterWavelength type=\"Double\">(.*?)</CenterWavelength>',lines[1])[1])\n metadata_dictionary['orientation'] = re.search(b'orientation=\"(.*?)\"',lines[1])[1].decode('ANSI')\n\n # Get the wavelength and intensity\n wavelength_string = re.search(b'<Wavelength xml:space=\"preserve\">(.*?)</Wavelength>',lines[1])[1].decode('utf-8')\n wavelength = np.array(wavelength_string.split(','), dtype=np.float64)\n\n f.seek(4100)\n intensity = np.fromfile(f,dtype=np.float32,count=metadata_dictionary['size'])\n\n raman_shift_wavenumbers = 1e7*(1/metadata_dictionary['excitation_wavelength'] - 1/wavelength)\n\n f.close()\n \n # create the sidpy dataset\n data_set = Dataset.from_array(intensity, name='Raman Spectra')\n\n data_set.data_type = 'spectrum'\n data_set.units = 'counts'\n data_set.quantity = 'Intensity'\n\n # set dimensions\n data_set.set_dimension(0, Dimension(raman_shift_wavenumbers, name='Raman Shift',\n units = 'cm-1',\n quantity='Raman shift',\n dimension_type='spectral'))\n data_set.set_dimension(1, Dimension(intensity, name='Intensity',\n units = 'counts',\n quantity='intensity',\n dimension_type='spectral')) \n\n data_set.metadata = metadata_dictionary\n\n return data_set", "def Read_RMCA_out(Complete_Path):\n fid = open(Complete_Path,'r')\n L,R = [],[]\n while 1: \n line = fid.readline()\n if line =='': \n break \n else :\n L.append(float(line[:25]))\n R.append(float(line[27:-2]))\n return np.array(L),np.array(R)", "def read_from_file(self,grd_fn):\n self.grd_fn = grd_fn\n self.fp = open(self.grd_fn,'rt')\n hdr = self.fp.readline().strip() #header &GRD_2008 or &LISTGRD\n\n if hdr == self.hdr_08:\n print( \"Will read 2008 format for grid\" )\n n_parms = 11\n elif hdr == self.hdr_old:\n print( \"Will read old UnTRIM grid format\" )\n n_parms = 10\n\n for i in range(n_parms): # ignore TNE and TNS in new format files\n l = self.fp.readline()\n lhs,rhs = l.split('=')\n val = rhs.strip().strip(',')\n varname = lhs.strip()\n print( \"%s=%s\"%(varname,val) )\n\n if varname=='NV':\n Nvertices = int(val)\n elif varname=='NE':\n Npolys = int(val)\n elif varname=='NS':\n Nsides = int(val)\n elif varname=='NBC':\n Nboundary_poly = int(val)\n elif varname=='NSI':\n Ninternal_sides = int(val)\n elif varname=='NSF':\n Nflow_sides = int(val)\n elif varname=='NBC':\n Nbc = int(val)\n elif varname=='ANGLE':\n self.angle = float(val)\n elif varname=='LOCATION':\n self.location = val\n elif varname=='NR': ## these are read, but not used\n Nred = int(val)\n elif varname=='TNE':\n TNE=int(val)\n elif varname=='TNS':\n TNS=int(val)\n # others: HLAND for older fmt.\n \n while 1:\n s = self.fp.readline().strip() # header: /\n if s == '/':\n break\n\n # We know the size of everything, and can ask UnstructuredGrid to allocate\n # arrays now, with the 'special' meaning that passing an integer means allocate\n # the array of that size, full of zeros.\n # this allocates\n # self.nodes, self.edges, self.cells\n self.from_simple_data(points = Nvertices,edges = Nsides, cells = Npolys)\n\n for v in range(Nvertices):\n Cv = self.fp.readline().split()\n if hdr == self.hdr_08:\n vertex_num = int(Cv.pop(0))\n if vertex_num != v+1:\n print( \"Mismatched vertex numbering: %d != %d\"%(vertex_num,v+1) )\n self.nodes['x'][v,0] = float(Cv[0])\n self.nodes['x'][v,1] = float(Cv[1])\n \n print( \"Npolys\",Npolys )\n self.cells['edges'] = self.UNKNOWN # initialize all\n self.cells['nodes'] = self.UNKNOWN\n \n for c in range(Npolys):\n l = self.fp.readline()\n Cp = l.split()\n if hdr == self.hdr_08:\n poly_num = int(Cp.pop(0))\n if poly_num-1 != c:\n print( \"Mismatched polygon id: %fd != %d\"%(poly_num,c+1) )\n \n numsides = int(Cp[0])\n\n self.cells['_center'][c,0] = float(Cp[1])\n self.cells['_center'][c,1] = float(Cp[2])\n\n if hdr == self.hdr_old:\n # vertex index is Cp[3,5,7,9]\n # the others, 4,6,8,10, are edges, right?\n # convert to 0 based indices here\n\n # This is probably wrong! I think it's actually reading the\n # sides\n self.cells['edges'][c,0] = int(Cp[4]) - 1\n self.cells['edges'][c,1] = int(Cp[6]) - 1 \n self.cells['edges'][c,2] = int(Cp[8]) - 1\n if numsides == 4:\n self.cells['edges'][c,3] = int(Cp[10]) - 1 \n else:\n self.cells['edges'][c,3]=self.UNDEFINED\n #HERE - need to copy that to self.cells['nodes']\n else:\n for ei in range(numsides):\n self.cells['nodes'][c,ei] = int(Cp[3+ei]) - 1\n self.cells['edges'][c,ei] = int(Cp[3+numsides+ei]) - 1\n self.cells['nodes'][c,numsides:]=self.UNDEFINED\n self.cells['edges'][c,numsides:]=self.UNDEFINED\n \n # choose some large, above-sea-level depth\n self.cells['depth_mean'] = -1000 # not sure this is doing anything...\n\n for e in range(Nsides):\n Cs = self.fp.readline().split()\n if hdr == self.hdr_08:\n # side num = int(Cs.pop(0))\n Cs.pop(0)\n elif hdr == self.hdr_old:\n # side depth?\n edge_depth = self.edges['depth_mean'][e] = float(Cs.pop(0))\n \n self.edges['nodes'][e,0] = int(Cs[0])-1 # vertex indices\n self.edges['nodes'][e,1] = int(Cs[1])-1\n \n self.edges['cells'][e,0] = int(Cs[2])-1 # cell neighbors\n self.edges['cells'][e,1] = int(Cs[3])-1\n\n if hdr == self.hdr_old:\n for nc in self.edges['cells'][e]:\n if nc >= 0 and edge_depth > self.cells['depth_mean'][nc]:\n self.cells['depth_mean'][nc] = edge_depth\n\n if hdr==self.hdr_old:\n # old format - have to infer cell nodes from edges\n self.make_cell_nodes_from_edge_nodes()\n\n # Try to make sense of the marks and red/black:\n self.cells['red'][:Nred] = True\n self.cells['mark'][:Nboundary_poly] = self.BOUNDARY\n self.edges['mark'][:Ninternal_sides] = 0\n self.edges['mark'][Ninternal_sides:Nflow_sides] = self.FLOW\n self.edges['mark'][Nflow_sides:] = self.LAND\n\n # Bathymetry:\n if hdr == self.hdr_08:\n # make a cheap tokenizer to read floats across lines\n # note that it's up to the user to know that all values from\n # the line are read, and not to get the iterator until you're\n # ready for some values to be read\n def tokenizer():\n while True:\n for item in self.fp.readline().split():\n yield item\n for c in range(Npolys):\n check_c,nis = [int(s) for s in self.fp.readline().split()]\n if check_c != c+1:\n print(\"ERROR: while reading cell subgrid, cell index mismatch: %s vs. %d\"%(c+1,check_c))\n \n next_token = tokenizer().next\n areas = np.array( [float(next_token()) for sg in range(nis)] )\n depths = np.array( [float(next_token()) for sg in range(nis)] )\n \n self.cells['depth_mean'][c] = np.sum(areas*depths) / np.sum(areas)\n self.cells['_area'][c] = np.sum(areas)\n self.cells['depth_max'][c] = depths.max()\n self.cells['subgrid'][c] = (areas,depths)\n for e in range(Nflow_sides):\n l = self.fp.readline()\n # print \"%d/%d - Read line: %s\"%(e,self.Nsides,l)\n check_e,nis = [int(s) for s in l.split()]\n if check_e != e+1:\n print( \"ERROR: While reading edge subgrid, edge index mismatch: %s vs. %s\"%(e+1,check_e) )\n next_token = tokenizer().next\n lengths = np.array( [float(next_token()) for sg in range(nis)] )\n depths = np.array( [float(next_token()) for sg in range(nis)] )\n if sum(lengths)<=0:\n print( \"edge %d has bad lengths\"%e )\n self.edges['depth_mean'][e] = np.sum(lengths*depths) / sum(lengths)\n self.edges['depth_max'][e] = depths.max()\n self.edges['subgrid'][e] = (lengths,depths)\n # and land boundaries get zeros.\n for e in range(Nflow_sides,Nsides):\n self.edges['depth_mean'][e] = 0.0\n self.edges['depth_max'][e] = 0.0\n self.edges['subgrid'][e] = ([],[])", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def parseSpineXout(ofname):\n# 0 1 2 3 4 5 6 7 8 9 10 11 12\n# # index AA SS phi1 psi1 P_E P_C P_H phi0 psi0 ASA S_pk S_SS pk_phi pk_psi pkc_phi pkc_ps\n# 1 E C -85.6 141.3 0.0527 0.8784 0.0689 -87.5 143.0 130.5 0.6941 0.4126 -5.0000 5.0000 0.9924 0.2499\n ss=[]\n phi=[]\n psi=[]\n asa=[]\n rasa=[]\n MAX_ACC=getMAXASA('single')\n for f in open(ofname,'r'):\n f=f.split()\n if f[0]=='#':\n continue\n #ss.append(f[2])\n phi.append([float(f[8]),float(f[3])])\n psi.append([float(f[9]),float(f[4])])\n ss.append([float(i) for i in f[5:8]])\n asa.append(float(f[10]))\n try:\n m=MAX_ACC[f[1]] #if key not found then produce nan\n except KeyError as e:\n print e\n m=np.nan\n continue\n rasa.append(float(f[10])/m)\n return (np.array(asa),np.array(rasa),np.array(ss),np.array(phi),np.array(psi))", "def readDriverFile(self, input_file):\n\n\n fid = open(self.basePath + input_file,'r')\n\n # Line 1\n line = fid.readline()\n l_input = line.split('!')\n mshfile = l_input[0].rstrip()\n\n # Line 2\n line = fid.readline()\n l_input = line.split('!')\n obsfile = l_input[0].rstrip()\n\n # Line 3\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='null':\n topofile = []\n\n else:\n topofile = l_input[0].rstrip()\n\n\n # Line 4\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n mstart = float(l_input[1])\n\n else:\n mstart = l_input[0].rstrip()\n\n # Line 5\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n mref = float(l_input[1])\n\n else:\n mref = l_input[0].rstrip()\n\n # Line 6\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n staticInput = float(l_input[1])\n\n elif l_input[0]=='DEFAULT':\n staticInput = None\n\n else:\n staticInput = l_input[0].rstrip()\n\n\n # Line 7\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='DEFAULT':\n magfile = []\n\n else:\n magfile = l_input[0].rstrip()\n\n # Line 8\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='DEFAULT':\n wgtfile = []\n\n else:\n wgtfile = l_input[0].rstrip()\n\n # Line 9\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n chi = float(l_input[0])\n\n # Line 10\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n val = np.array(l_input[0:4])\n alphas = val.astype(np.float)\n\n # Line 11\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:3])\n bounds = val.astype(np.float)\n\n else:\n bounds = l_input[0].rstrip()\n\n # Line 12\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:6])\n lpnorms = val.astype(np.float)\n\n else:\n lpnorms = l_input[0].rstrip()\n\n # Line 13\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:3])\n eps = val.astype(np.float)\n\n else:\n eps = [None,None]\n\n self.mshfile = mshfile\n self.obsfile = obsfile\n self.topofile = topofile\n self.mstart = mstart\n self._mrefInput = mref\n self._staticInput = staticInput\n self.magfile = magfile\n self.wgtfile = wgtfile\n self.chi = chi\n self.alphas = alphas\n self.bounds = bounds\n self.lpnorms = lpnorms\n self.eps = eps", "def read(self, run):\n # read the file\n self['run'] = run[0:run.rfind('.xml')]\n f = open(run)\n for line in f:\n \n if line.find('SDSU Exec') >= 0:\n n1 = line.index('name=') + 6\n n2 = line.index('\"', n1)\n self['application'] = line[n1:n2]\n\n elif line.find('<detector_status') >= 0:\n n1 = line.index('name=') + 6\n n2 = line.index('\"', n1)\n if line[n1:n2] != 'Ultraspec':\n raise Exception, 'Run ' + run + ' is not an Ultraspec file.'\n \n elif line.find('SPEED') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['speed'] = line[n1:n2]\n \n elif line.find('X_BIN') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x_bin'] = line[n1:n2]\n \n elif line.find('Y_BIN') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y_bin'] = line[n1:n2]\n \n # first window \n \n elif line.find('X1_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x1_start'] = line[n1:n2]\n \n elif line.find('X1_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x1_size'] = line[n1:n2]\n \n elif line.find('Y1_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y1_start'] = line[n1:n2]\n \n elif line.find('Y1_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y1_size'] = line[n1:n2]\n \n # second window\n \n elif line.find('X2_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x2_start'] = line[n1:n2]\n \n elif line.find('X2_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x2_size'] = line[n1:n2]\n \n elif line.find('Y2_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y2_start'] = line[n1:n2]\n \n elif line.find('Y2_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y2_size'] = line[n1:n2]\n \n elif line.find('<target>') >= 0:\n n1 = line.index('target') + 7\n n2 = line.index('<', n1)\n self['target'] = line[n1:n2]\n\n elif line.find('<grating>') >= 0:\n n1 = line.index('grating') + 8\n n2 = line.index('<', n1)\n self['grating'] = line[n1:n2]\n\n elif line.find('<slit_width>') >= 0:\n n1 = line.index('slit_width') + 11\n n2 = line.index('<', n1)\n self['slit_width'] = line[n1:n2]\n\n elif line.find('<slit_angle>') >= 0:\n n1 = line.index('slit_angle') + 11\n n2 = line.index('<', n1)\n self['slit_angle'] = line[n1:n2]\n \n elif line.find('<filters>') >= 0:\n n1 = line.index('filters') + 8\n n2 = line.index('<', n1)\n self['filters'] = line[n1:n2]\n\n elif line.find('<ID>') >= 0:\n n1 = line.index('ID') + 3\n n2 = line.index('<', n1)\n self['ID'] = line[n1:n2]\n\n elif line.find('<PI>') >= 0:\n n1 = line.index('PI') + 3\n n2 = line.index('<', n1)\n self['PI'] = line[n1:n2]\n\n elif line.find('<comment>') >= 0:\n n1 = line.index('comment') + 8\n n2 = line.index('<', n1)\n self['comment'] = line[n1:n2]\n \n\n # check that we have found what we expected to find\n if 'application' not in self:\n raise Exception, 'Failed to find application name in ' + run\n\n if self.is_not_power_onoff():\n\n if 'x_bin' not in self:\n raise Exception, 'Failed to find X_BIN in ' + run\n\n if 'y_bin' not in self:\n raise Exception, 'Failed to find Y_BIN in ' + run\n\n if 'x1_start' not in self:\n raise Exception, 'Failed to find X2_START in ' + run\n \n if 'x1_size' not in self:\n raise Exception, 'Failed to find X2_SIZE in ' + run\n \n if 'y1_start' not in self:\n raise Exception, 'Failed to find Y2_START in ' + run\n \n if 'y1_size' not in self:\n raise Exception, 'Failed to find Y2_SIZE in ' + run\n \n if 'x2_start' not in self:\n raise Exception, 'Failed to find X2_START in ' + run\n \n if 'x2_size' not in self:\n raise Exception, 'Failed to find X2_SIZE in ' + run\n \n if 'y2_start' not in self:\n raise Exception, 'Failed to find Y2_START in ' + run\n \n if 'y2_size' not in self:\n raise Exception, 'Failed to find Y2_SIZE in ' + run\n \n if 'target' not in self:\n self['target'] = 'UNKNOWN'\n\n if 'filters' not in self:\n self['filters'] = '---'\n\n if 'grating' not in self:\n self['grating'] = '---'\n\n if 'slit_width' not in self:\n self['slit_width'] = '---'\n\n if 'slit_angle' not in self:\n self['slit_angle'] = '---'\n\n if 'ID' not in self:\n self['ID'] = 'UNKNOWN'\n\n if 'PI' not in self:\n self['PI'] = 'UNKNOWN'", "def read(self, timestamp=None):\n grbs = pygrib.open(self.filename)\n\n grid = self.subgrid\n\n return_img = {}\n return_metadata = {}\n\n var_msg_lut = {p: None for p in self.parameter}\n sea_mask = None\n for N in range(grbs.messages):\n n = N + 1\n message = grbs.message(n)\n param_name = str(message.cfVarNameECMF)\n\n if param_name == \"lsm\":\n if self.mask_seapoints and sea_mask is None:\n sea_mask = message.values.flatten()\n\n if param_name not in self.parameter:\n continue\n else:\n var_msg_lut[param_name] = n\n\n # available variables\n shape = None\n for param_name, n in var_msg_lut.items():\n if n is None:\n continue\n\n return_metadata[param_name] = {}\n\n message = grbs.message(n)\n\n param_data = message.values.flatten()\n if not shape:\n shape = param_data.shape\n return_img[param_name] = param_data\n\n if grid is None:\n lats, lons = message.latlons()\n try:\n res_lat, res_lon = get_grid_resolution(lats, lons)\n grid = ERA_RegularImgGrid(res_lat, res_lon)\n except ValueError: # when grid not regular\n lons_gt_180 = np.where(lons > 180.0)\n lons[lons_gt_180] = lons[lons_gt_180] - 360\n grid = ERA_IrregularImgGrid(lons, lats)\n\n return_metadata[param_name][\"units\"] = message[\"units\"]\n return_metadata[param_name][\"long_name\"] = \\\n message[\"parameterName\"]\n\n if \"levels\" in message.keys():\n return_metadata[param_name][\"depth\"] = \"{:} cm\".format(\n message[\"levels\"])\n\n if self.mask_seapoints:\n if sea_mask is None:\n raise IOError(\n \"No land sea mask parameter (lsm) in passed image\"\n \" for masking.\")\n else:\n # mask the loaded data\n for name in return_img.keys():\n param_data = return_img[name]\n param_data = np.ma.array(\n param_data,\n mask=np.logical_not(sea_mask),\n fill_value=np.nan,\n )\n param_data = param_data.filled()\n return_img[name] = param_data\n\n grbs.close()\n\n # missing variables\n for param_name, n in var_msg_lut.items():\n if n is not None:\n continue\n param_data = np.full(shape, np.nan)\n warnings.warn(\"Cannot load variable {var} from file {thefile}. \"\n \"Filling image with NaNs.\".format(\n var=param_name, thefile=self.filename))\n return_img[param_name] = param_data\n return_metadata[param_name] = {}\n return_metadata[param_name][\"long_name\"] = lookup(\n self.product, [param_name]).iloc[0][\"long_name\"]\n\n if self.array_1D:\n return Image(\n grid.activearrlon,\n grid.activearrlat,\n return_img,\n return_metadata,\n timestamp,\n )\n else:\n nlat = np.unique(grid.activearrlat).size\n nlon = np.unique(grid.activearrlon).size\n\n for key in return_img:\n return_img[key] = return_img[key].reshape((nlat, nlon))\n\n return Image(\n grid.activearrlon.reshape(nlat, nlon),\n grid.activearrlat.reshape(nlat, nlon),\n return_img,\n return_metadata,\n timestamp,\n )", "def __load_data(self) -> np.array:\n with open('.'+os.sep+'ressources'+os.sep+self.path+os.sep+'capacity.txt','r') as fp:\n capacity = self.__parse_line(fp.readline())\n with open('.' + os.sep + 'ressources' + os.sep + self.path + os.sep + 'demand.txt', 'r') as fp:\n demand = self.__parse_line(fp.readline())\n with open('.' + os.sep + 'ressources' + os.sep + self.path + os.sep + 'distance.txt', 'r') as fp:\n distance_matrix = []\n for line in fp:\n row = self.__parse_line(line)\n distance_matrix.append(row)\n with open('.' + os.sep + 'ressources' + os.sep + self.path + os.sep + 'transportation_cost.txt', 'r') as fp:\n transportation_cost = self.__parse_line(fp.readline())\n return np.asarray(capacity), np.asarray(demand), np.asarray(distance_matrix), np.asarray(transportation_cost)", "def structure(self, ism_input):\n f = open(ism_input, 'r')\n data = []\n for line in f:\n line = line.replace('\\\"', '')\n line = line.replace('],[', '];[')\n line = line.strip()\n line = line.replace(']', '')\n line = line.replace('[', '')\n line = line.split(';')\n line[0] = line[0].split('|')\n ls = list(map(lambda x: x.split(','), line[1:]))\n ls = list(map(lambda x: list(map(lambda y: y.split('|'), x)), ls))\n line[1:] = ls\n data.append(line)\n data = np.array(data[1:]) \n \n return data", "def readCrystParam(crystfile):\n \n # Default values\n ccell1 = np.eye(3)\n ccell2 = np.eye(3)\n planehkl = [1,0,0]\n diruvw = [0,1,0]\n \n try:\n with open(crystfile,\"r\") as f:\n content = f.readlines()\n except FileNotFoundError:\n content = []\n\n for l in content:\n if l[0].rstrip() == \"#\":\n continue\n line = l.split('=')\n if len(line) == 2:\n if line[0].rstrip()==\"ccell1\":\n ccell1 = eval(line[1].rstrip())\n elif line[0].rstrip()==\"ccell2\":\n ccell2 = eval(line[1].rstrip())\n elif line[0].rstrip()==\"planehkl\":\n planehkl = eval(line[1].rstrip())\n elif line[0].rstrip()==\"diruvw\":\n diruvw = eval(line[1].rstrip())\n else:\n print(\"WARNING: %s is not a supported input\"%(line[0].rstrip()))\n elif len(line) > 2:\n raise SyntaxError(l)\n\n return ccell1, ccell2, planehkl, diruvw", "def rdspecdat(self):\n # TODO : ugh. this is crude. Should have some checks for file format\n # and probably better to use the astropy.io functions now.\n try:\n w, f, e = np.loadtxt(self.filename, unpack=True)\n except:\n w, f = np.loadtxt(self.filename, unpack=True)\n e = []", "def open_gains(fname, snver=1):\n\n hdu = get_hdu(fname, extname='AIPS SN', ver=snver)\n\n nif = hdu.header['NO_IF']\n npol = hdu.header['NO_POL']\n nant = hdu.header['NO_ANT']\n # set ``nif'' from dtype of hdu.data\n _data = np.zeros(hdu.header['NAXIS2'], dtype=[('start', '<f8'),\n ('stop', '<f8'),\n ('antenna', 'int'),\n ('gains', 'complex',\n (nif, npol,)),\n ('weights', '<f8',\n (nif, npol,))])\n\n time = hdu.data['TIME']\n dtime = hdu.data['TIME INTERVAL']\n antenna = hdu.data['ANTENNA NO.']\n\n # Constructing `gains` field\n rgains = hdu.data['REAL1'] + 1j * hdu.data['IMAG1']\n # => (466, 8)\n lgains = hdu.data['REAL2'] + 1j * hdu.data['IMAG2']\n rgains = np.expand_dims(rgains, axis=2)\n # => (466, 8, 1)\n lgains = np.expand_dims(lgains, axis=2)\n gains = np.dstack((rgains, lgains))\n # => (466, 8, 2)\n\n # Constructing `weights` field\n rweights = hdu.data['WEIGHT 1']\n # => (466, 8)\n lweights = hdu.data['WEIGHT 2']\n rweights = np.expand_dims(rweights, axis=2)\n # => (466, 8, 1)\n lweights = np.expand_dims(lweights, axis=2)\n weights = np.dstack((rweights, lweights))\n # => (466, 8, 2)\n\n # Filling structured array by fields\n _data['start'] = time - 0.5 * dtime\n _data['stop'] = time + 0.5 * dtime\n _data['antenna'] = antenna\n _data['gains'] = gains\n _data['weights'] = weights\n\n gains = list()\n for ant in set(_data['antenna']):\n idx = _data['antenna'] == ant\n gains.append(GainCurve(ant, nif, npol, _data[idx][['start', 'stop',\n 'gains',\n 'weights']]))\n return gains", "def readFile(file_name):\n if file_name.split('.')[-1] == 'thid':\n x,m,w = readThid(file_name)\n e = np.empty_like(x)\n e[:] = np.nan\n return x,m,w,e\n else:\n return readParams(file_name)", "def ReadBasicInfo():\r\n\r\n EquilibriumStep, ProductionStep,HEPCP,HEPCE,Multiple=10000000,10000000,100,100,2\r\n InputPath,OutputPath,AtomParameterPath,TaskSuffix,MaterialInputFormat='..','..','..','','mol'\r\n GasType,GasAtomTypeNum,GasAtomType,GasPartialPressure,TemperatureList,PressureList,\\\r\n TorqueSetting,MuSiCSetting,Nodes=[],[],[],[],[],[],[],[],['1:ppn=1']\r\n CutOff,GridSpacingP,GridSpacingE=12.8,2.0,2.0\r\n MakeGCMC,UsePmap,UseEmap,UsePost,MakePmap,MakeEmap,MakeTorque,KeyOne,KeyTwo,\\\r\n PDBCharges = False,False,False,False,False,False,False,False,False,False\r\n\r\n with open('GlueParameters', 'r') as File:\r\n for Line in File.readlines():\r\n if Line.strip():\r\n WordList = Line.strip().split()\r\n if len(WordList)>1 or KeyOne==True or KeyTwo==True:\r\n if WordList[0]=='#':\r\n continue\r\n\r\n # Controlled part\r\n elif WordList[0] == 'MakeGCMC:' and WordList[1] == 'open':\r\n MakeGCMC = True\r\n elif WordList[0] == 'UsePmap:' and WordList[1] == 'yes':\r\n UsePmap = True\r\n elif WordList[0] == 'UseEmap:' and WordList[1] == 'yes':\r\n UseEmap = True\r\n elif WordList[0] == 'UsePost:' and WordList[1] == 'yes':\r\n UsePost = True\r\n elif WordList[0] == 'MakePmap:' and WordList[1] == 'open':\r\n MakePmap = True\r\n elif WordList[0] == 'MakeEmap:' and WordList[1] == 'open':\r\n MakeEmap = True\r\n elif WordList[0] == 'MakeTorque:' and WordList[1] == 'open':\r\n MakeTorque = True\r\n elif WordList[0] == 'UseChargesFromPDBFile:' and WordList[1] == 'yes':\r\n PDBCharges = True\r\n\r\n # Basic part\r\n elif WordList[0]=='InputPath:':\r\n InputPath=WordList[1]\r\n elif WordList[0]=='MaterialInputFormat:':\r\n MaterialInputFormat=WordList[1]\r\n elif WordList[0]=='OutputPath:':\r\n OutputPath=WordList[1]\r\n elif WordList[0]=='AtomParameterPath:':\r\n AtomParameterPath=WordList[1]\r\n elif WordList[0] == 'GasType:':\r\n GasType = list(WordList[1:])\r\n elif WordList[0] == 'GasAtomTypeNum:':\r\n\r\n for i in WordList[1:]:\r\n GasAtomTypeNum.append(int(i))\r\n\r\n elif WordList[0] == 'GasAtomType:':\r\n GasAtomType = list(WordList[1:])\r\n elif WordList[0] == 'Multiple:':\r\n Multiple = int(WordList[1])\r\n elif WordList[0] == 'CutOff:':\r\n CutOff = float(WordList[1])\r\n\r\n # GCMC part\r\n\r\n elif WordList[0] == 'GasPartialPressure:':\r\n\r\n for j in WordList[1:]:\r\n GasPartialPressure.append(str(j))\r\n\r\n elif WordList[0] == 'TemperatureList(K):':\r\n\r\n for l in WordList[1:]:\r\n TemperatureList.append(float(l))\r\n\r\n elif WordList[0] == 'PressureList(kPa):':\r\n\r\n for k in WordList[1:]:\r\n PressureList.append(float(k))\r\n\r\n elif WordList[0] == 'EquilibriumStep:':\r\n EquilibriumStep = int(WordList[1])\r\n elif WordList[0] == 'ProductionStep:':\r\n ProductionStep = int(WordList[1])\r\n\r\n # Pmap part\r\n elif WordList[0] == 'GridSpacingP(Ang):':\r\n GridSpacingP = float(WordList[1])\r\n elif WordList[0] == 'HighEndPotentialCutoffP(kJ/mol):':\r\n HEPCP = int(WordList[1])\r\n\r\n # Emap part\r\n elif WordList[0] == 'GridSpacingE(Ang):':\r\n GridSpacingE = float(WordList[1])\r\n elif WordList[0] == 'HighEndPotentialCutoffE(kJ/mol):':\r\n HEPCE = int(WordList[1])\r\n\r\n # Torque part\r\n elif WordList[0] == 'Nodes:':\r\n Nodes = WordList[1:]\r\n elif WordList[0] == 'TaskSuffix:':\r\n TaskSuffix = WordList[1]\r\n elif WordList[0] == 'TorqueSetting:':\r\n KeyOne = True\r\n elif WordList[0] == 'MuSiCSetting:':\r\n KeyOne = False\r\n KeyTwo = True\r\n elif WordList[0] == 'END':\r\n KeyTwo = False\r\n elif KeyOne == True:\r\n TorqueSetting.append(Line)\r\n elif KeyTwo == True:\r\n MuSiCSetting.append(Line)\r\n\r\n return (InputPath,OutputPath,AtomParameterPath,MakeTorque,GasType,\r\n GasAtomTypeNum,GasAtomType,GasPartialPressure,TemperatureList,PressureList,CutOff,MakeGCMC,UsePmap,\r\n UseEmap,UsePost,MakePmap,MakeEmap,EquilibriumStep,ProductionStep,GridSpacingP,HEPCP,GridSpacingE,HEPCE,\r\n Multiple,TorqueSetting,MuSiCSetting,Nodes,TaskSuffix,PDBCharges,MaterialInputFormat)", "def getSubParamLine(self,subname, numNodesSub, subParamInfo,dir_name):\n #nodeSubInterface = []\n subOptionInfo_p = []\n subSchemInfo_p = []\n filename_t = subname + '.sub'\n filename_t = os.path.join(dir_name, filename_t)\n data_p = self.readNetlist(filename_t)\n subOptionInfo_p, subSchemInfo_p = self.separateNetlistInfo(data_p)\n \n if len(subOptionInfo_p) > 0:\n newline = subOptionInfo_p[0]\n newline = newline.split('.subckt '+ subname) \n intLine = newline[1].split()\n print \"numNodesSub Index---------->\",numNodesSub\n newindex = numNodesSub[subname]\n appen_line = intLine[newindex:len(intLine)]\n appen_param = ','.join(appen_line)\n paramLine = 'parameter Real ' + appen_param + ';'\n paramLine = paramLine.translate(maketrans('{}', ' '))\n subParamInfo.append(paramLine)\n return subParamInfo", "def readBeaches(filein):\n\n rdarray = np.genfromtxt(filein, skip_header=1, delimiter=',', usecols=[5,6,8,9,10,11,12])\n beaches = np.genfromtxt(filein, skip_header=1, delimiter=',', usecols=[4], dtype=str)\n sspaid = np.genfromtxt(filein, skip_header=1, delimiter=',', usecols=[0], dtype=str)\n\n beachlats = rdarray[:,0]\n beachlons = rdarray[:,1]\n beachnorm = rdarray[:,2]\n maxang = rdarray[:,3]\n beachslope = rdarray[:,4]\n approslope = rdarray[:,5]\n beachtype = np.array(rdarray[:,6], dtype=int)\n\n return sspaid, beaches, beachlats, beachlons, beachnorm, maxang, approslope, beachslope, beachtype", "def spinex_sec(infile, sequence):\n return np.loadtxt(infile, usecols=[7, 5, 6], skiprows=1).reshape((1, -1, 3))", "def legacy_load(self,filepath = '', amplifier = 'Amplifier'):\n if filepath == '':\n filepath = filedialog.askopenfilename()\n file1 = open(filepath)\n ctxt = file1.readline().rstrip()\n\n header = ''\n rowskip = 0\n while ctxt[0] == '#':\n header = header + ctxt[1:]\n ctxt = file1.readline().rstrip()\n rowskip += 1\n\n voltstr = header[2:-1]\n\n if voltstr.find(',') > 0:\n volts = np.fromstring(voltstr, sep=',')\n else:\n volts = np.fromstring(voltstr, sep='\\t')\n\n file1.close()\n data1 = np.loadtxt(filepath, skiprows=rowskip)\n self.hydoutput = data1\n self.cfreq = volts\n self.amplifier = amplifier", "def loadFromINIFile(self): \r\n if (self.verbose):\r\n print(\"Reading SA300.ini\")\r\n iniFile = open('SA300.ini','r')\r\n for i in range(8):\r\n aLine = iniFile.readline().rstrip(\"\\n\") # read line \r\n tokens = aLine.split()\r\n if (self.verbose):\r\n print(tokens)\r\n self.IDStrList[i].set(tokens[0])\r\n self.WeightList[i].set(tokens[1])\r\n self.schedList[i].set(self.sched[int(tokens[2])])\r\n self.paramNumList[i].set(tokens[3])\r\n self.SessionLengthList[i].set(tokens[4])\r\n self.IBILengthList[i].set(tokens[5])\r\n self.PumpTimeList[i].set(tokens[6])\r\n self.calcPumpTimeList[i].set(tokens[7])\r\n aString = iniFile.readline().rstrip(\"\\n\") # COM number (done differently on a Mac)\r\n self.portString.set(aString)\r\n # print(\"portString = \"+aString)\r\n aString = iniFile.readline().rstrip(\"\\n\") # read next line\r\n tokens = aString.split()\r\n self.varCode = int(tokens[0])\r\n # print(\"self.varCode =\",self.varCode,format(self.varCode,'08b')) \r\n for bit in range(8):\r\n mask = (2**bit) # mask (eg. 00001000)\r\n # Uses AND and mask to determine whether to set bit\r\n if (self.varCode & mask > 0): self.sysVarList[bit].set(True)\r\n else: self.sysVarList[bit].set(False)\r\n iniFile.close()", "def read_PSSM_data(self):\n\n names = os.listdir(self.pssm_path)\n fname = [n for n in names if n.find(self.molname)==0]\n\n if len(fname)>1:\n raise ValueError('Multiple PSSM files found for %s in %s',self.mol_name,self.pssm_path)\n if len(fname)==0:\n raise FileNotFoundError('No PSSM file found for %s in %s',self.mol_name,self.pssm_path)\n else:\n fname = fname[0]\n\n f = open(self.pssm_path + '/' + fname,'rb')\n data = f.readlines()\n f.close()\n raw_data = list( map(lambda x: x.decode('utf-8').split(),data))\n\n self.res_data = np.array(raw_data)[:,:3]\n self.res_data = [ (r[0],int(r[1]),r[2]) for r in self.res_data ]\n self.pssm_data = np.array(raw_data)[:,3:].astype(np.float)", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def init_data_array(self, mess = None): \n if self.verbose > 1:\n print(\"MultiLinearSpectra.init_data_array()\") \n \n if mess is None:\n if self.mess is None:\n warnings.warn(\"MultiLinearSpectra.init_data_array(): no data to initialize\")\n return None\n else:\n self.mess = mess\n \n\n \n \n for m in range(len(self.mess)):\n \n self.mess[m][\"index\"] = m\n \n kwargs = {}\n for k, v in self.mess[m].items():\n kwargs[k] = v\n \n if self.mess[m][\"class\"] == \"PASGas\" and flag_ST:\n self.mess[m][\"object\"] = PASG.PASGas(verbose = self.verbose, **kwargs)\n\n elif self.mess[m][\"class\"] == \"PASLiquid\" and flag_ST:\n self.mess[m][\"object\"] = PASL.PASLiquid(verbose = self.verbose, **kwargs)\n\n\n # x_unit = self.mess[0].x_unit\n # y_unit = self.mess[0].y_unit\n\n # for m in range(1, len(self.mess)):\n # if x_unit != self.mess[m].x_unit:\n # self.mess.x_unit", "def parse_parameters(filePath):\r\n numThreads, queue, affinity = 0,\"\",\"\"\r\n \r\n for line in open(filePath):\r\n if \"spec.omp2001.size:\" in line:\r\n if get_last_column_number(line)==\"test\":\r\n print(\"IS TEST SIZE!!1 : \" + filePath)\r\n \r\n if \"spec.omp2001.sw_threads:\" in line:\r\n numThreads = int(get_last_column_number(line))\r\n \r\n if \"spec.omp2001.mach:\" in line:\r\n machine = line.split(\" \")[-1]\r\n columns = machine.split(\".\")\r\n \r\n queue = columns[0]\r\n affinity = columns[1]\r\n \r\n return numThreads, queue, affinity", "def read_params(fname):\n f = open(fname, 'r')\n par = {} #output\n for i in range(10): # esta dentro de las primeras 10 lineas\n l = f.readline().split()\n #print \" ---> \", l\n number = u'%s' % l[-1] # presumably a number\n if not number.replace('.','').replace('-','').isnumeric():\n if l[0]=='#####':\n break\n else:\n continue # we proceed ONLY IF this is numeric string\n #print ' FIRST: ', l[0]\n if l[0]=='#####':\n #print \"IM I HERE????\"\n break # end of header\n\n name = l[1][:-1] # l[0] es '#', y -1 para comernos el \":\"\n value = np.float(l[2]) # l[2] es el valor\n par[name] = value\n\n return par", "def read_file(self, fullname):\n\n data = np.genfromtxt(fullname, dtype=None, names=True, skip_header=0)\n return data", "def load_regain_values(filename):\n gain_lines = open(filename,\"r\").readlines()\n gain_lines = [l.split() for l in gain_lines if len(l)>0 and l[0]!='#'] #remove comments and blanks\n tubes,gain_vals = zip(*[(int(l[0]),float(l[1])) for l in gain_lines])\n return Array(gain_vals)", "def load_params(fname):\n parmsff = {}\n # FIXME: This might fail if a parameter name is larger than 50 characters.\n # FIXME: Maybe do this with the csv module instead?\n temparr = numpy.loadtxt(fname, dtype=([('a','S50'),('b','f8')]), delimiter=',') \n for i in temparr:\n parmsff[i[0]] = i[1]\n return parmsff", "def read_sp2(file_name, debug=False, arm_convention=True):\n\n my_data = open(file_name, \"rb\").read()\n # Get file date from name\n if platform.system() == \"Windows\":\n split_file_name = file_name.split(\"\\\\\")\n else:\n split_file_name = file_name.split(\"/\")\n if arm_convention:\n next_split = split_file_name[-1].split(\".\")\n dt = datetime.strptime(next_split[2], \"%Y%m%d\")\n else:\n dt = datetime.strptime(split_file_name[-1][0:8], \"%Y%m%d\")\n\n if len(my_data) > 0:\n bytepos = 0\n numCols = struct.unpack(\">I\", my_data[bytepos:bytepos + 4])[0]\n bytepos += 4\n numChannels = struct.unpack(\">I\", my_data[bytepos:bytepos + 4])[0]\n if debug:\n print((\"Loaded file with numCols = {}, numChannels = {}\"\n .format(numCols, numChannels)))\n\n data_points_per_record = numChannels * numCols\n\n bytes_per_record = 2 * data_points_per_record\n bytes_not_data_array = 12 + 2 + 28 + 16\n bytes_per_record += bytes_not_data_array\n last_pos = int(bytes_per_record - 1)\n num_spare_cols = struct.unpack(\">I\", my_data[last_pos - 4:last_pos])[0]\n if debug:\n print(\"Number of spare columns = %d\" % num_spare_cols)\n\n if num_spare_cols != 0:\n bytes_per_record += num_spare_cols\n\n numRecords = int(len(my_data) / bytes_per_record)\n totalRows = numChannels * numRecords\n DataWave = np.zeros((totalRows, numCols), dtype='int16')\n Flag = np.zeros(int(totalRows / numChannels), dtype='int16')\n TimeWave = np.zeros(numRecords, dtype='float64')\n Res1 = np.zeros(numRecords, dtype='float32')\n EventIndex = np.zeros(numRecords, dtype='float32')\n TimeDiv10000 = np.zeros(numRecords, dtype='float64')\n TimeRemainder = np.zeros(numRecords, dtype='float64')\n Res5 = np.zeros(numRecords, dtype='float32')\n Res6 = np.zeros(numRecords, dtype='float32')\n Res7 = np.zeros(numRecords, dtype='float64')\n Res8 = np.zeros(numRecords, dtype='float64')\n if num_spare_cols != 0:\n SpareDataArray = np.zeros(numRecords, num_spare_cols)\n\n arrayFmt = \">\"\n for i in range(data_points_per_record):\n arrayFmt += \"h\"\n\n for record in range(numRecords):\n dataStartPoint = record * bytes_per_record + 8\n startRow = record * numChannels\n endRow = startRow + numChannels - 1\n the_row = np.array(struct.unpack(\n arrayFmt, my_data[dataStartPoint:dataStartPoint + int(data_points_per_record * 2)]))\n\n DataWave[startRow:endRow + 1, 0:numCols] = the_row.reshape(\n numCols, numChannels).T\n dataStartPoint += data_points_per_record * 2\n Flag[record] = struct.unpack(\">h\", my_data[dataStartPoint:dataStartPoint + 2])[0]\n next_floats = struct.unpack(\">ffffffff\", my_data[dataStartPoint + 2:dataStartPoint + 34])\n TimeWave[record] = next_floats[0]\n Res1[record] = next_floats[1]\n EventIndex[record] = next_floats[2]\n TimeDiv10000[record] = next_floats[3]\n TimeRemainder[record] = next_floats[4]\n Res5[record] = next_floats[5]\n Res6[record] = next_floats[6]\n next_doubles = struct.unpack(\">dd\", my_data[dataStartPoint + 34:dataStartPoint + 50])\n Res7[record] = next_doubles[0]\n Res8[record] = next_doubles[1]\n dataStartPoint += 50\n\n if num_spare_cols != 0:\n startRow = (2 * num_spare_cols) * record\n dataStartPoint += bytes_not_data_array - 4\n spareFmt = \">\"\n for i in range(num_spare_cols):\n spareFmt += \"f\"\n\n SpareDataArray[record] = np.array(\n struct.unpack(spareFmt, my_data[dataStartPoint:dataStartPoint+4*num_spare_cols]))\n\n UTCtime = TimeDiv10000 * 10000 + TimeRemainder\n diff_epoch_1904 = (\n datetime(1970, 1, 1) - datetime(1904, 1, 1)).total_seconds()\n UTCdatetime = np.array([\n datetime.utcfromtimestamp(x - diff_epoch_1904) for x in UTCtime])\n\n DateTimeWave = (dt - datetime(1904, 1, 1)).total_seconds() + TimeWave\n\n # Make an xarray dataset for SP2\n Flag = xr.DataArray(Flag, dims={'event_index': EventIndex})\n Res1 = xr.DataArray(Res1, dims={'event_index': EventIndex})\n Res5 = xr.DataArray(Res5, dims={'event_index': EventIndex})\n Res6 = xr.DataArray(Res6, dims={'event_index': EventIndex})\n Res7 = xr.DataArray(Res7, dims={'event_index': EventIndex})\n Res8 = xr.DataArray(Res8, dims={'event_index': EventIndex})\n Time = xr.DataArray(UTCdatetime, dims={'event_index': EventIndex})\n EventInd = xr.DataArray(EventIndex, dims={'event_index': EventIndex})\n DateTimeWaveUTC = xr.DataArray(UTCtime, dims={'event_index': EventIndex})\n DateTimeWave = xr.DataArray(DateTimeWave, dims={'event_index': EventIndex})\n TimeWave = xr.DataArray(TimeWave, dims={'event_index': EventIndex})\n my_ds = xr.Dataset({'time': Time, 'Flag': Flag, 'Res1': Res1, 'Res5': Res5,\n 'Res6': Res6, 'Res7': Res7, 'Res8': Res8, 'EventIndex': EventInd,\n 'DateTimeWaveUTC': DateTimeWaveUTC, 'TimeWave': TimeWave,\n 'DateTimeWave': DateTimeWave})\n\n for i in range(numChannels):\n temp_array = np.zeros((numRecords, numCols), dtype='int')\n for j in range(numRecords):\n k = i + j*numChannels\n temp_array[j] = DataWave[k]\n my_ds['Data_ch' + str(i)] = xr.DataArray(\n temp_array, dims={'event_index': EventIndex, 'columns': np.arange(0, 100, 1)})\n del my_data\n del DataWave\n return my_ds\n else:\n return None", "def t12_loadSMFile(self):\n print \"spectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = \"data/\" + self.filenameparser(filename)\n orgin_file = self.filenameparser(filename)\n self.t12_smfilename = filename\n self.t12_orginfilename = orgin_file", "def Read_Rcwa_Matlab(Path) : \n x,y=[],[]\n fs = open(Path, 'r') \n while 1: \n txt = fs.readline()\n if txt =='': \n break\n x.append(float(txt[0:25]))\n y.append(float(txt[29:-2])) \n fs.close()\n return x,y", "def load(self,filename = ''):\n if filename == '':\n filename = filedialog.askopenfilename()\n data = np.load(filename)\n\n try:\n self.amplifier = data['amplifier'].item()\n\n except:\n pass\n\n self.matchingnetwork = data['matchingnetwork'].item()\n self.hydro = data['hydro'].item()\n self.headerversion = data['headerversion'].item()\n self.depth = data['depth']\n self.samplingfreq = data['samplingfreq'].item()\n self.voltage = data['voltage']\n self.pulselength = data['pulselength'].item()\n self.pulserep = data['pulserep'].item()\n self.cfreq = data['cfreq']\n self.angle = data['angle']\n self.bursts = data['bursts'].item()\n self.hydoutput = data['hydoutput']\n self.txdr = data['txdr'].item()\n try:\n self.amplify = data['amplify'].item()\n except:\n print(\"Amplify variable not available\")\n try:\n self.operator = data['operator'].item()\n except:\n print(\"Operator variable not available\")", "def _read_calibration_params(self) -> np.ndarray:\n print('Loading calibration parameters...')\n cameras_data = []\n\n for c in range(self.num_cameras):\n camera = 'camera' + str(c).zfill(2) + '.json'\n print(' ', camera+'...')\n with open(os.path.join(self.cameras_dir, camera)) as f:\n data = json.load(f)\n\n # # Store data for each frame in numpy array\n # camera_params = np.empty(0)\n # for d in data:\n # frames = d['end_frame'] - d['start_frame']\n # del d['start_frame']\n # del d['end_frame']\n # cam = np.full(frames, d)\n # camera_params = np.append(camera_params, cam, axis=0)\n #\n cameras_data.append(data)\n return np.array(cameras_data, dtype=object)", "def _read_input_file(self):\n file_type = 'np.array'\n with open(self._file_properties['file_name'], 'r') as in_file:\n for line in in_file.readlines():\n if line[0:5] == '$$SOE':\n file_type = 'Horizons'\n break\n\n if not isfile(self._file_properties['file_name']):\n msg = 'Horizons files {:} does not exists.'\n message = msg.format(self._file_properties['file_name'])\n raise FileExistsError(message)\n if file_type == 'Horizons':\n self._read_horizons_file()\n else:\n (time, x, y, z) = np.loadtxt(\n self._file_properties['file_name'],\n usecols=(0, 1, 2, 3), unpack=True)\n self._time = time\n if int(astropy_version[0]) >= 4:\n self._xyz = SkyCoord(x=x, y=y, z=z,\n representation_type='cartesian')\n else:\n self._xyz = SkyCoord(x=x, y=y, z=z,\n representation='cartesian')", "def readin():\r\n nodes = np.loadtxt('Vnodes.txt', ndmin=2)\r\n mats = np.loadtxt('Vmater.txt', ndmin=2)\r\n elements = np.loadtxt('Veles.txt', ndmin=2)\r\n loads = np.loadtxt('Vloads.txt', ndmin=2)\r\n return nodes, mats, elements, loads", "def _process_data_file(self):\n \n with open(self.data_file, 'r') as f:\n self.description = f.readline().strip()\n data = np.loadtxt(self.data_file, skiprows=1)\n\n return data", "def legacy_load(self,filepath = '', amplifier = 'Amplifier'):\n\n if filepath == '':\n filepath = filedialog.askopenfilename()\n\n file1 = open(filepath)\n ctxt = file1.readline().rstrip()\n\n header = ''\n rowskip = 0\n while ctxt[0] == '#':\n header = header + ctxt[1:]\n ctxt = file1.readline().rstrip()\n rowskip += 1\n\n voltstr = header[2:-1]\n\n if voltstr.find(',') > 0:\n volts = np.fromstring(voltstr, sep=',')\n else:\n volts = np.fromstring(voltstr, sep='\\t')\n\n file1.close()\n data1 = np.loadtxt(filepath, skiprows=rowskip)\n self.hydoutput = data1\n self.voltage = volts\n self.amplifier = amplifier", "def __init__(self, inFilename):\n\n self._prmtopVersion=None\n self._flags=[]\n self._raw_format={}\n self._raw_data={}\n self._has_nbfix_terms = False\n\n with open(inFilename, 'r') as fIn:\n for line in fIn:\n if line[0] == '%':\n if line.startswith('%VERSION'):\n tag, self._prmtopVersion = line.rstrip().split(None, 1)\n elif line.startswith('%FLAG'):\n tag, flag = line.rstrip().split(None, 1)\n self._flags.append(flag)\n self._raw_data[flag] = []\n elif line.startswith('%FORMAT'):\n format = line.rstrip()\n index0=format.index('(')\n index1=format.index(')')\n format = format[index0+1:index1]\n try:\n m = FORMAT_RE_PATTERN.search(format)\n self._raw_format[self._flags[-1]] = (format, m.group(1), m.group(2), int(m.group(3)), m.group(4))\n except:\n # We couldn't parse the format, so just treat the whole line as a single string.\n self._raw_format[self._flags[-1]] = (format, 1, 'a', 80, '')\n elif line.startswith('%COMMENT'):\n continue\n elif self._flags \\\n and 'TITLE'==self._flags[-1] \\\n and not self._raw_data['TITLE']:\n self._raw_data['TITLE'] = line.rstrip()\n else:\n flag=self._flags[-1]\n (format, numItems, itemType,\n iLength, itemPrecision) = self._getFormat(flag)\n line = line.rstrip()\n for index in range(0, len(line), iLength):\n item = line[index:index+iLength]\n if item:\n self._raw_data[flag].append(item.strip())\n # See if this is a CHAMBER-style topology file, which is not supported\n # for creating Systems\n self.chamber = 'CTITLE' in self._flags", "def read_cbr_file(filename,INFO):\n \n \n # Defaul setting\n if not len(INFO):\n # Number of parameters\n INFO = {'nopars':32,\n 'latterhalf':0}\n \n with open(filename, 'rb') as fid:\n BD = np.fromfile(fid, np.float64)\n\n N = int(BD.shape[0]/INFO['nopars'])\n \n dims = [N,INFO['nopars']]\n \n PARS = BD.reshape(dims)\n \n return PARS", "def __readCONTINoutput(self):\n\n titleline = 'OBJ. FCTN. VARIANCE STD. DEV.'\n chunkTitle = re.compile('OBJ. FCTN. VARIANCE STD. DEV. ')\n\n alldata = []\n\n with open(self.outputfile, 'r') as f:\n\n for line in f:\n if chunkTitle.search(line) is not None:\n\n alphadic = {}\n\n # gets the header\n alphaLine = next(f)\n if '*' in alphaLine:\n alphadic['marked'] = True\n\n alphaLine = alphaLine.replace('*', '')\n alphaParam = np.fromstring(alphaLine, sep=' ')\n\n # reduce the header line to string seperated text\n line = re.sub('\\s\\s\\s+', ' ', line).strip()\n for key, value in zip(line.split(' '), alphaParam):\n alphadic[key] = value\n # skip a line then get the data\n next(f)\n # alldata.append((alphadic, readblock(f)))\n alldata.append(\n (alphadic, readblock(f), readSummaryData(f)))\n\n # skip a line then get the data\n # print(next(f))\n\n return alldata", "def load_spe(filename):\n def read_at(data, pos, size, ntype):\n raw.seek(pos)\n return np.fromfile(raw, ntype, size)\n raw = open(filename, 'rb')\n xdim = np.int64(read_at(raw, 42, 1, np.int16)[0])\n ydim = np.int64(read_at(raw, 656, 1, np.int16)[0])\n arr = read_at(raw, 4100, xdim*ydim, np.uint16)\n arr = arr.reshape((ydim, xdim))\n print('data shape: {}'.format(np.shape(arr)))\n if np.shape(arr)[0] == 1:\n arr = arr[0]\n print('data shape: {}'.format(np.shape(arr)))\n return arr", "def read_parameters(self, entry=None):\n if entry:\n self.entry = entry\n with self.entry.nxfile:\n self.name = self.entry.nxroot.nxname + \"/\" + self.entry.nxname\n if 'unit_cell' in self.entry['sample']:\n lattice_parameters = self.read_parameter('sample/unit_cell')\n if lattice_parameters is not None:\n self.a, self.b, self.c = lattice_parameters[:3]\n self.alpha, self.beta, self.gamma = lattice_parameters[3:]\n elif 'unit_cell_abc' in self.entry['sample']:\n lattice_parameters = self.read_parameter(\n 'sample/unit_cell_abc')\n if lattice_parameters is not None:\n self.a, self.b, self.c = lattice_parameters\n lattice_parameters = self.read_parameter(\n 'sample/unit_cell_alphabetagamma')\n if lattice_parameters is not None:\n self.alpha, self.beta, self.gamma = lattice_parameters\n else:\n self.a = self.read_parameter('sample/unitcell_a', self.a)\n self.b = self.read_parameter('sample/unitcell_b', self.b)\n self.c = self.read_parameter('sample/unitcell_c', self.c)\n self.alpha = self.read_parameter(\n 'sample/unitcell_alpha', self.alpha)\n self.beta = self.read_parameter(\n 'sample/unitcell_beta', self.beta)\n self.gamma = self.read_parameter(\n 'sample/unitcell_gamma', self.gamma)\n self.formula = self.read_parameter('sample/chemical_formula',\n self.formula)\n self.space_group = self.read_parameter(\n 'sample/space_group', self.space_group)\n self.laue_group = self.read_parameter(\n 'sample/laue_group', self.laue_group)\n self.wavelength = self.read_parameter(\n 'instrument/monochromator/wavelength', self.wavelength)\n self.distance = self.read_parameter('instrument/detector/distance',\n self.distance)\n self.yaw = self.read_parameter('instrument/detector/yaw', self.yaw)\n self.pitch = self.read_parameter('instrument/detector/pitch',\n self.pitch)\n self.roll = self.read_parameter(\n 'instrument/detector/roll', self.roll)\n self.xc = self.read_parameter('instrument/detector/beam_center_x',\n self.xc)\n self.yc = self.read_parameter('instrument/detector/beam_center_y',\n self.yc)\n self.xd = self.read_parameter('instrument/detector/translation_x',\n self.xd)\n self.yd = self.read_parameter('instrument/detector/translation_y',\n self.yd)\n self.frame_time = self.read_parameter(\n 'instrument/detector/frame_time', self.frame_time)\n self.shape = self.read_parameter(\n 'instrument/detector/shape', self.shape)\n phi = self.read_parameter('instrument/goniometer/phi', self.phi)\n if isinstance(phi, np.ndarray) and len(phi) > 1:\n self.phi = phi[0]\n self.phi_step = phi[1] - phi[0]\n else:\n self.phi = phi\n try:\n self.phi_step = self.read_parameter(\n 'instrument/goniometer/phi', self.phi, attr='step')\n except Exception:\n pass\n self.chi = self.read_parameter(\n 'instrument/goniometer/chi', self.chi)\n self.omega = self.read_parameter('instrument/goniometer/omega',\n self.omega)\n if 'instrument/goniometer' in self.entry:\n if 'theta' in self.entry['instrument/goniometer']:\n self.theta = self.read_parameter(\n 'instrument/goniometer/theta', self.theta)\n elif 'goniometer_pitch' in self.entry['instrument/goniometer']:\n self.theta = self.read_parameter(\n 'instrument/goniometer/goniometer_pitch', self.theta)\n elif 'gonpitch' in self.entry['instrument/goniometer']:\n self.theta = self.read_parameter(\n 'instrument/goniometer/gonpitch', self.theta)\n self.symmetry = self.read_parameter('sample/unit_cell_group',\n self.symmetry)\n self.centring = self.read_parameter('sample/lattice_centring',\n self.centring)\n self.xp = self.read_parameter('peaks/x')\n self.yp = self.read_parameter('peaks/y')\n self.zp = self.read_parameter('peaks/z')\n self.polar_angle = self.read_parameter('peaks/polar_angle')\n self.azimuthal_angle = self.read_parameter('peaks/azimuthal_angle')\n self.intensity = self.read_parameter('peaks/intensity')\n self.pixel_size = self.read_parameter(\n 'instrument/detector/pixel_size', self.pixel_size)\n self.pixel_mask = self.read_parameter(\n 'instrument/detector/pixel_mask')\n self.pixel_mask_applied = self.read_parameter(\n 'instrument/detector/pixel_mask_applied')\n self.rotation_angle = self.read_parameter('peaks/rotation_angle')\n self.primary = self.read_parameter('peaks/primary_reflection')\n self.secondary = self.read_parameter('peaks/secondary_reflection')\n self.Umat = self.read_parameter(\n 'instrument/detector/orientation_matrix')\n if isinstance(self.polar_angle, np.ndarray):\n try:\n self.set_polar_max(np.sort(self.polar_angle)[200] + 0.1)\n except IndexError:\n self.set_polar_max(self.polar_angle.max())\n else:\n self.set_polar_max(10.0)\n self.Qh = self.read_parameter('transform/Qh')\n self.Qk = self.read_parameter('transform/Qk')\n self.Ql = self.read_parameter('transform/Ql')\n self.initialize_peaks()", "def t9_loadSMFile(self):\n print \"spectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = \"data/\" + self.filenameparser(filename)\n orgin_file = self.filenameparser(filename).split('.')[0]\n self.t9_smfilename = filename\n self.t9_orginfilename = orgin_file", "def parse_params(self,tokenized_lines):\n\n # extract key-value pairs\n conversions = {\n # Space\n \"nuclide\" : tools.tuple_of(int), # use tuple so parameter is hashable when used as analysis key\n \"A\" : tools.singleton_of(int),\n \"Nsigma\" : tools.singleton_of(float),\n \"Nsigmamax\" : tools.singleton_of(int),\n \"N1v\" : tools.singleton_of(int),\n \"Nmax\" : tools.singleton_of(int),\n # Interaction\n \"interaction\" : tools.singleton_of(str),\n \"use_coulomb\" : tools.singleton_of(tools.bool_from_str),\n # Relative observables\n \"observable_names\" : tools.list_of(str),\n # Calculation\n \"hw\" : tools.singleton_of(float)\n }\n key_value_dict = tools.extract_key_value_pairs(\n tokenized_lines,conversions\n )\n\n # legacy support: force interaction to \"JISP16\" for early runs where\n # interaction field was provided as reserved field but not set to \"JISP16\"\n if (\"interaction\" in key_value_dict):\n if (key_value_dict[\"interaction\"] == \"RESERVED\"):\n key_value_dict[\"interaction\"]=\"JISP16\"\n\n # provide \"coulomb\" as preferred field name to match mfdn results analysis\n if (\"use_coulomb\" in key_value_dict):\n key_value_dict[\"coulomb\"] = key_value_dict[\"use_coulomb\"]\n \n # update to params dictionary\n self.params.update(key_value_dict)", "def postIdealizedAnalysis(inpath, outpath, member,\n refpath='/lustre/research/bancell/aucolema/HWT2016runs/2016050800/wrfoutREF'):\n # SENSvals file naming conventions\n sensval_varstrings = [\"GPH_300\", \"GPH_500\", \"GPH_700\", \"GPH_850\", \"SKIP\",\n \"T_300\", \"T_500\", \"T_700\", \"T_850\", \"T_925\",\n \"U_300\", \"U_500\", \"U_700\", \"U_850\", \"U_925\",\n \"V_300\", \"V_500\", \"V_700\", \"V_850\", \"V_925\",\n \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\",\n \"SKIP\", \"SKIP\", \"Q_850\", \"SKIP\", \"SLP\", \"T2\",\n \"TD2\", \"U10\", \"V10\"]\n # Post-processed new file naming conventions\n sensstringslist = [\"300 hPa GPH\",\"500 hPa GPH\",\"700 hPa GPH\",\n \"850 hPa GPH\",\"925 hPa GPH\",\"300 hPa T\",\"500 hPa T\",\n \"700 hPa T\",\"850 hPa T\",\"925 hPa T\",\"300 hPa U-Wind\",\n \"500 hPa U-Wind\",\"700 hPa U-Wind\",\"850 hPa U-Wind\",\n \"925 hPa U-Wind\",\"300 hPa V-Wind\",\"500 hPa V-Wind\",\n \"700 hPa V-Wind\",\"850 hPa V-Wind\",\"925 hPa V-Wind\",\n \"300 hPa Dewpt\", \"500 hPa Dewpt\", \"700 hPa Dewpt\",\n \"850 hPa Dewpt\", \"925 hPa Dewpt\", \"300 hPa Q\",\n \"500 hPa Q\", \"700 hPa Q\", \"850 hPa Q\", \"925 hPa Q\",\n \"SLP\",\"2m Temp\",\"2m Dewpt\",\n \"10m U-Wind\",\"10m V-Wind\"]\n\n # Get more dimensions/geographical info\n wrf_d1 = Dataset(refpath)\n lons, lats = wrf_d1.variables['XLONG'][0], wrf_d1.variables['XLAT'][0]\n wrf_idim = len(lons[0,:])\n wrf_jdim = len(lats[:,0])\n\n # Write interpolated variables to netCDF\n new_analysis = Dataset(outpath, \"w\", format=\"NETCDF4\")\n new_analysis.createDimension('lat', wrf_jdim)\n new_analysis.createDimension('lon', wrf_idim)\n new_analysis.createDimension('time', None)\n xlat = new_analysis.createVariable(\"XLAT\", float, dimensions=('lat','lon'))\n xlat[:,:] = lats\n xlon = new_analysis.createVariable(\"XLONG\", float, dimensions=('lat','lon'))\n xlon[:,:] = lons\n\n # Open dataset and start pulling member fields\n member_fields = np.zeros((len(sensval_varstrings), wrf_jdim, wrf_idim))\n sensvar_dat = Dataset(inpath)\n for ind, var in enumerate(sensval_varstrings):\n # print(\"SENSvals variable:\", var, \"New variable string\", sensstringslist[ind])\n if var != \"SKIP\":\n member_fields[ind] = sensvar_dat[var][member-1][:]\n newvar = new_analysis.createVariable(\n sensstringslist[ind].replace(\" \",\"_\"),\n member_fields[ind].dtype,\n dimensions=('lat','lon'))\n newvar[:,:] = member_fields[ind]\n new_analysis.close()\n return", "def postIdealizedAnalysis(inpath, outpath, member,\n refpath='/lustre/research/bancell/aucolema/HWT2016runs/2016050800/wrfoutREF'):\n # SENSvals file naming conventions\n sensval_varstrings = [\"GPH_300\", \"GPH_500\", \"GPH_700\", \"GPH_850\", \"SKIP\",\n \"T_300\", \"T_500\", \"T_700\", \"T_850\", \"T_925\",\n \"U_300\", \"U_500\", \"U_700\", \"U_850\", \"U_925\",\n \"V_300\", \"V_500\", \"V_700\", \"V_850\", \"V_925\",\n \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\", \"SKIP\",\n \"SKIP\", \"SKIP\", \"Q_850\", \"SKIP\", \"SLP\", \"T2\",\n \"TD2\", \"U10\", \"V10\"]\n # Post-processed new file naming conventions\n sensstringslist = [\"300 hPa GPH\",\"500 hPa GPH\",\"700 hPa GPH\",\n \"850 hPa GPH\",\"925 hPa GPH\",\"300 hPa T\",\"500 hPa T\",\n \"700 hPa T\",\"850 hPa T\",\"925 hPa T\",\"300 hPa U-Wind\",\n \"500 hPa U-Wind\",\"700 hPa U-Wind\",\"850 hPa U-Wind\",\n \"925 hPa U-Wind\",\"300 hPa V-Wind\",\"500 hPa V-Wind\",\n \"700 hPa V-Wind\",\"850 hPa V-Wind\",\"925 hPa V-Wind\",\n \"300 hPa Dewpt\", \"500 hPa Dewpt\", \"700 hPa Dewpt\",\n \"850 hPa Dewpt\", \"925 hPa Dewpt\", \"300 hPa Q\",\n \"500 hPa Q\", \"700 hPa Q\", \"850 hPa Q\", \"925 hPa Q\",\n \"SLP\",\"2m Temp\",\"2m Dewpt\",\n \"10m U-Wind\",\"10m V-Wind\"]\n\n # Get more dimensions/geographical info\n wrf_d1 = Dataset(refpath)\n lons, lats = wrf_d1.variables['XLONG'][0], wrf_d1.variables['XLAT'][0]\n wrf_idim = len(lons[0,:])\n wrf_jdim = len(lats[:,0])\n\n # Write interpolated variables to netCDF\n new_analysis = Dataset(outpath, \"w\", format=\"NETCDF4\")\n new_analysis.createDimension('lat', wrf_jdim)\n new_analysis.createDimension('lon', wrf_idim)\n new_analysis.createDimension('time', None)\n xlat = new_analysis.createVariable(\"XLAT\", float, dimensions=('lat','lon'))\n xlat[:,:] = lats\n xlon = new_analysis.createVariable(\"XLONG\", float, dimensions=('lat','lon'))\n xlon[:,:] = lons\n\n # Open dataset and start pulling member fields\n member_fields = np.zeros((len(sensval_varstrings), wrf_jdim, wrf_idim))\n sensvar_dat = Dataset(inpath)\n for ind, var in enumerate(sensval_varstrings):\n # print(\"SENSvals variable:\", var, \"New variable string\", sensstringslist[ind])\n if var != \"SKIP\":\n member_fields[ind] = sensvar_dat[var][member-1][:]\n newvar = new_analysis.createVariable(\n sensstringslist[ind].replace(\" \",\"_\"),\n member_fields[ind].dtype,\n dimensions=('lat','lon'))\n newvar[:,:] = member_fields[ind]\n new_analysis.close()\n return", "def read_from_ascii(self, filename):\n self.ascii_filename = filename\n # read file content into a string\n f=open(filename,'r')\n file_str=f.read()\n f.close()\n # make dictionary with file content\n reg_exp_data_groups=re.compile(r'^#>>(\\w+):.*\\n',re.M)\n file_dict=self.make_data_dict_from_str(reg_exp_data_groups,file_str)\n # read arrays ------------------------------\n self.x=np.loadtxt(StringIO.StringIO(file_dict['x']))\n self.p=np.loadtxt(StringIO.StringIO(file_dict['p']))\n self.fmci_XP=np.loadtxt(StringIO.StringIO(file_dict['XP']))\n # regular expression for extracting parameter=value\n reg_exp_param_val=re.compile(r'\\n*(\\w+)=',re.M)\n # read params_physics -----------------------\n params_physics_dict=self.make_data_dict_from_str(reg_exp_param_val,file_dict['params_physics'])\n self.name=self.__get_particle_name(params_physics_dict['particle'])\n self.time=float(params_physics_dict['time'])\n # read params_TDC ---------------------------\n params_TDC_dict=self.make_data_dict_from_str(reg_exp_param_val,file_dict['params_TDC'])\n self.calc_id=params_TDC_dict['calc_id']\n self.i_ts=int(params_TDC_dict['i_ts'])", "def _readCharAuxVariablesHeaderSection(self):\n self.NAUXV = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n self.NAUXC = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n nonCharAuxVars = self.NAUXV - self.NAUXC\n if self.NAUXV > 0:\n self.ASCAL = nappy.utils.text_parser.readItemsFromUnknownLines(self.file, nonCharAuxVars, float)\n self.AMISS = nappy.utils.text_parser.readItemsFromUnknownLines(self.file, nonCharAuxVars, float)\n self.LENA = nappy.utils.text_parser.readItemsFromUnknownLines(self.file, self.NAUXC, int)\n for i in range(nonCharAuxVars):\n self.LENA.insert(0, None)\n self.AMISS = self.AMISS + nappy.utils.text_parser.readItemsFromUnknownLines(self.file, self.NAUXC, str) \n self.ANAME = nappy.utils.text_parser.readItemsFromLines(self._readLines(self.NAUXV), self.NAUXV, str)", "def _readAuxVariablesHeaderSection(self):\n self.NAUXV = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n if self.NAUXV > 0: \n self.ASCAL = nappy.utils.text_parser.readItemsFromUnknownLines(self.file, self.NAUXV, float)\n self.AMISS = nappy.utils.text_parser.readItemsFromUnknownLines(self.file, self.NAUXV, float)\n self.ANAME = nappy.utils.text_parser.readItemsFromLines(self._readLines(self.NAUXV), self.NAUXV, str)", "def file_read(file_name):\n \n #open specified file in read mode\n in_file = open(file_name, \"r\")\n \n #create data lists\n sp_length_v3 = []\n sp_period_v3 = [] \n\n #save header to string and split into list\n header_string = in_file.readline()\n header_v3 = header_string.split()\n \n #save revelent data to respective lists\n for line in in_file:\n values = line.split()\n sp_length_v3.append(float(values[1]))\n sp_period_v3.append(float(values[2]))\n \n #close the file\n in_file.close()\n \n #return 3D lists of lists containing data\n ans = [sp_length_v3, sp_period_v3, header_v3]\n \n return ans", "def __init__(self, file_name):\r\n self.file_name = file_name\r\n \r\n try: # Catch if name is wrong\r\n with open(file_name, 'rb') as f:\r\n self.data= self.read_file(f)\r\n \r\n except IOError:\r\n print \"Could not read file:\", file_name\r\n\r\n self.prom = []\r\n self.stddev= []\r\n self.out_of_3stddev = []", "def readVP(self,species): \n f = open('VPparams.txt', 'rU')\n lines = f.readlines()\n f.close()\n \n parsing = False\n for i in np.arange(len(lines)):\n if lines[i].startswith(species):\n parsing = True\n else:\n parsing = False\n if parsing:\n data = lines[i].split()\n \n lnC, L0, Rv, da, db = data[1:len(data)]\n self.lnC, self.L0, self.Rv, self.da, self.db = \\\n float(lnC), float(L0), float(Rv), float(da), float(db)", "def readmli(datafile, par, sub_im, cr):\n ct = int(par['range_samples']) * int(par['azimuth_lines'])\n\n dt = np.dtype('>f4') # GAMMA files are big endian 32 bit float\n\n d = np.fromfile(datafile, dtype=dt, count=ct)\n\n d = d.reshape(int(par['azimuth_lines']), int(par['range_samples']))\n #print(\"Number of elements and size of the array is\",d.size, d.shape)\n #d[d==0]= np.nan # convert zeros to nan\n return d[cr[1]-sub_im:cr[1]+sub_im,cr[0]-sub_im:cr[0]+sub_im]", "def read_settings():\n # open file for reading\n ifile = open(infilename, 'r')\n # skip comment line\n ifile.readline()\n # char-file name\n line = ifile.readline()\n line = line.split()\n charfile = line[-1]\n # header-file name\n line = ifile.readline()\n line = line.split()\n headerfile = line[-1]\n # output folder\n line = ifile.readline()\n line = line.split()\n ofname = line[-1]\n # length of the filaments\n line = ifile.readline()\n line = line.split()\n nfil = int(line[-1])\n # number of snapshots to skip\n line = ifile.readline()\n line = line.split()\n nskip = int(line[-1])\n # number of bins\n line = ifile.readline()\n line = line.split()\n nbin = int(line[-1])\n # maximum cutoff\n line = ifile.readline()\n line = line.split()\n rmax = float(line[-1])\n # overall number of snapshots to use in the analysis\n line = ifile.readline()\n line = line.split()\n ntotal = int(line[-1])\n # close file\n ifile.close()\n # return input values\n return charfile, headerfile, ofname, nfil, nskip, nbin, rmax, ntotal", "def readblock(fileObj):\n data = []\n\n p = re.compile('ORDINATE')\n q = re.compile('0LINEAR COEFFICIENTS')\n for line in fileObj:\n if q.search(line) is not None:\n break\n if p.search(line) is None:\n dataContent = line[0:31]\n dataContent = dataContent.replace('D', 'E')\n datarow = list(map(float, dataContent.split()))\n data.append(datarow)\n\n return np.array(data)", "def read_calibration_file(\n filename, return_integration_time=False,\n create_spectrum=True\n):\n assert isinstance(filename, str)\n area_texts = {\n 'Collection-area(cm^2)': ('area', 'cm**2'),\n 'Fiber(micron)': ('diameter', 'micrometer'),\n 'Fiber(cm)': ('diameter', 'cm'),\n 'Collection-area(um^2)': ('area', 'micrometer**2')\n }\n integration_time_texts = {\n 'Int.Time(usec)': 'microsecond',\n 'IntegrationTime(sec)': 'second',\n 'Int.Time(sec)': 'second',\n 'IntegrationTime(usec)': 'microsecond',\n }\n\n area = None\n integration_time = None\n cal_data = np.loadtxt(filename, skiprows=9)\n\n with open(filename, 'r') as f:\n # just check the first nine lines\n for n in range(9):\n line = next(f)\n # removes all spaces\n line = line.replace(' ', '').replace(':', '')\n if area is None:\n for area_text, area_type in area_texts.items():\n if area_text in line:\n area = float(line.replace(area_text, ''))\n break\n\n if integration_time is None:\n for it_text, it_units in integration_time_texts.items():\n if it_text in line:\n integration_time = float(line.replace(it_text, ''))\n break\n\n if (area is None) or (integration_time is None):\n raise DreyeError(\"Could not find area or \"\n \"integration time in lamp file.\")\n\n if area_type[0] == 'diameter':\n area = area * ureg(area_type[1])\n area = np.pi * (area/2) ** 2\n area = area.to('cm**2')\n elif area_type[0] == 'area':\n area = (area * ureg(area_type[1])).to('cm**2')\n else:\n raise DreyeError(\"Area type {area_type} not recognized.\")\n\n integration_time = (integration_time * ureg(it_units)).to('s')\n\n if create_spectrum:\n cal = CalibrationSpectrum(\n values=cal_data[:, 1],\n domain=cal_data[:, 0],\n area=area\n )\n if return_integration_time:\n return cal, integration_time\n else:\n return cal\n elif return_integration_time:\n return cal_data[:, 0], cal_data[:, 1], area, integration_time\n\n return cal_data[:, 0], cal_data[:, 1], area", "def load_subsmatrix(self, matrixfile):\n self.subs_matrix = {}\n with open(matrixfile, 'rt') as rd:\n cols = []\n for line in rd:\n line = line.rstrip(os.linesep)\n if line.startswith(('#', '\\n')):\n continue\n item = line.strip().split()\n if re.search(r'^\\s+', line):\n cols = item\n else:\n for i in range(1, len(item)):\n self.subs_matrix.setdefault(item[0], {}).setdefault(\n cols[i-1], int(item[i]))", "def load_event_properties(experiment):\n return np.loadtxt('../Slip_Property_Data/%s_event_properties.txt'%experiment,delimiter=',',skiprows=1)", "def load_event_properties(experiment):\n return np.loadtxt('../Slip_Property_Data/%s_event_properties.txt'%experiment,delimiter=',',skiprows=1)", "def get_params(file):\n import shlex\n f = open(file)\n for line in f:\n text = shlex.split(line)\n if (\"InitialTime\" in text):\n tval = float(text[len(text)-1])\n elif (\"DensityUnits\" in text):\n dUnit = float(text[len(text)-1])\n elif (\"TimeUnits\" in text):\n tUnit = float(text[len(text)-1])\n elif (\"LengthUnits\" in text):\n lUnit = float(text[len(text)-1])\n return [tval, dUnit, tUnit, lUnit]", "def _readVariablesHeaderSection(self):\n self.NV = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n self.VSCAL = nappy.utils.text_parser.readItemsFromUnknownLines(self.file, self.NV, float)\n self.VMISS = nappy.utils.text_parser.readItemsFromUnknownLines(self.file, self.NV, float)\n self.VNAME = nappy.utils.text_parser.readItemsFromLines(self._readLines(self.NV), self.NV, str)", "def readSigfile(filename):\r\n gtTrace = []\r\n gtTime = []\r\n gtHR = []\r\n with open(filename,'r') as f :\r\n x = f.readlines()\r\n s = x[0].split(' ')\r\n s = list(filter(lambda a:a != '',s))\r\n gtTrace = np.array(s).astype(np.float64)\r\n\r\n t = x[2].split(' ')\r\n t = list(filter(lambda a: a != '' ,t))\r\n gtTime = np.array(t).astype(np.float64)\r\n\r\n hr = x[1].split(' ')\r\n hr = list(filter(lambda a: a != '' ,hr))\r\n gtHR = np.array(hr).astype(np.float64)\r\n\r\n data = np.array(gtTrace)\r\n time = np.array(gtTime)\r\n hr = np.array(gtHR)\r\n\r\n return data,hr", "def importParameterBoundaryFile(paramfilename):\n try:\n infile = open(paramfilename, \"r\")\n except IOError:\n\t print \"Unable to open file %s\" % (paramfilename)\n\t raise IOError(\"Unable to open parameter boundary file %s\" % (paramfilename))\n lines = infile.readlines()\n infile.close()\n\n # Parse\n paramdict = {}\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n continue\n elif line[0] == '#':\n continue\n else:\n terms = line.split()\n name = terms[0]\n value = float(terms[1])\n parmin = float(terms[2])\n parmax = float(terms[3])\n stepsize = float(terms[4])\n \n paramdict[name] = [value, parmin, parmax, stepsize]\n # ENDIF\n # ENDFOR\n\n return paramdict", "def read_multinest_file(shower_name):\n\twith open(fit_dir+'fit_'+shower_name+'.out') as file:\n\t\tline = file.readline().split()\n\t\tslope = 1-float(line[0])\n\t\tslope_err_plus = -float(line[1])\n\t\tslope_err_minus = float(line[2])\n\t\ty_int = float(file.readline().split()[0])\n\treturn slope, slope_err_plus, slope_err_minus, y_int", "def get_params(file):\n import shlex\n f = open(file)\n for line in f:\n text = shlex.split(line)\n if (\"DensityUnits\" in text):\n dUnit = float(text[len(text)-1])\n elif (\"TimeUnits\" in text):\n tUnit = float(text[len(text)-1])\n elif (\"LengthUnits\" in text):\n lUnit = float(text[len(text)-1])\n vUnit = lUnit/tUnit\n return [dUnit, tUnit, lUnit, vUnit]", "def loadconfig(file, name , conf, subarray=DEFAULT) :\n # do we really need multiSubarray for this?\n if subarray == BOTH:\n raise Exception, \"Can't do loadconfig on BOTH subarrays\"\n # is the following true?\n if subarray == DEFAULT and subarrayNo > 2:\n raise Exception, \"Must call loadconfig only on science subarrays\" \n # do we need a wait(SIGNALPATH) construct?\n\n try :\n multiSubarray('loadConfiguration', subarray, file, name, conf)\n except Exception, ex:\n print(ex.errorMsg)", "def parse_txt_file(txtfile):\n array = np.genfromtxt(txtfile)\n return array", "def read_file(self):\n\n\t\twith open(self.filename, 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tif len(line)>1:\n\t\t\t\t\tlenght_value,array_values = line.split(';')\n\t\t\t\t\tlist_values = [int(x) for x in array_values.split(',')]\n\t\t\t\t\tprint self.get_arraysurdit(list_values)", "def read_prop(self, fname, prop, add=True, mult=1):\r\n print('Reading ' + prop + ' input')\r\n typeVal = None\r\n val = 0\r\n with open(fname, \"r\") as fp:\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == prop:\r\n if len(item) >= 2:\r\n if item[1] == \"*CON\":\r\n val = float(item[2])\r\n typeVal = '*CON'\r\n elif item[1] == '*EQUALSI' or item[1] == 'EQUALSI':\r\n attr_I = prop[:-1] + 'I'\r\n # Change 'PERMJ' to be the keyword that identifies the end of attribute section\r\n data = self.read_prop(fname, attr_I, add=False, mult=mult)\r\n if len(item) == 4:\r\n op = item[2]\r\n if op == '*':\r\n data *= float(item[3])\r\n elif op == '/':\r\n data /= float(item[3])\r\n elif op == '+':\r\n data += float(item[3])\r\n elif op == '-':\r\n data -= float(item[3])\r\n elif item[1] == 'ALL':\r\n typeVal = 'ALL'\r\n break\r\n\r\n if typeVal == 'ALL':\r\n data = []\r\n count = 0\r\n for line in fp:\r\n item = line.split()\r\n for attr in item:\r\n if \"*\" in attr:\r\n item = attr.split(\"*\")\r\n for i in range(0, int(item[0])):\r\n data.append(float(item[1]))\r\n count += 1\r\n else:\r\n data.append(float(attr))\r\n count += 1\r\n # If true, all values have been read\r\n if count == self.size[0] * self.size[1] * self.size[2]:\r\n data = np.array(data)\r\n data = np.reshape(data, (self.size[2], self.size[1], self.size[0]), order=\"C\")\r\n break\r\n elif typeVal == '*CON':\r\n data = np.full((self.size[2], self.size[1], self.size[0]), val)\r\n\r\n if add:\r\n self.add_data(data, prop)\r\n self.out_props[prop] = data\r\n return data", "def readParamFromFile(self, file, sect):\r\n f = configparser.ConfigParser()\r\n f.read(file)\r\n # s = f.sections()\r\n # print(s)\r\n\r\n self.m_param = dict(f.items(sect))\r\n print(self.m_param)\r\n # print(len(self.m_param))\r", "def read_Hrrr(filename, parameters = [''],max = False):\n \n myfile = pygrib.open(filename) \n parameterlist = ['Geopotential Height','Temperature','Relative humidity','Dew point temperature',\n 'Specific humidity','Vertical velocity','U component of wind','V component of wind',\n 'Absolute vorticity','Cloud mixing ratio','Cloud Ice','Rain mixing ratio','Snow mixing ratio',\n 'Graupel (snow pellets)'] \n \n if parameters != ['']:\n for i in range(len(parameters)):\n x = parameterlist.count(parameters[i])\n if x == 0: \n print 'requested parameter not in list'\n print parameters[i] \n return 0\n parameterlist = parameters[:]\n \n \n data = []\n grb = myfile.select(name = parameterlist[0]) \n grb_cube = grb_to_grid(grb)\n dataloc = np.array(grb[0].latlons())\n datah = grb_cube['levels']\n units = []\n \n for p in parameterlist:\n grb = myfile.select(name = p)\n grb_cube = grb_to_grid(grb)\n if not max:\n data.append(grb_cube['data'])\n else:\n data.append(grb_cube['data'].max(axis=0))\n units.append(grb_cube['units'])\n \n return [data,parameterlist,datah,dataloc,units]", "def _read_params_txt(self) -> dict:\n df = pd.read_csv(self.file_path, sep=\" \", header=None, index_col=0).T\n\n sources_info = {\n \"sample_rate\": float(df[\"samplerate\"].iloc[0]),\n \"data_format\": df[\"dataformat\"].str.replace(\"'\", \"\").iloc[0],\n \"n_samples\": None,\n \"path\": self.file_path,\n }\n\n return sources_info", "def Read_Spectrum(Path,borne1 = 0,borne2 = 0) :\n x,y=[],[]\n fs = open(Path, 'r')\n #print('Open new fic') \n#index_array = 0\n while 1: \n txt = fs.readline()\n #print(txt)\n if ((txt =='')|(txt == '\\r\\n')): \n break\n #print(txt)\n ii=-1\n while 1: # on cherche le premier espace qui limite le premier nombre\n ii = ii+1\n #print(ii)\n if ((txt[ii] == ' ') |(txt[ii] == '\\t')):\n break\n \n x.append(float(txt[0:ii]))\n y.append(float(txt[ii:])) \n# if len(txt) == 21 : #nu >= 10000 cm-1\n# x.append(float(txt[0:11]))\n# y.append(float(txt[11:]))\n# elif len(txt) == 20 : #nu >= 1000 cm-1\n# x.append(float(txt[0:10]))\n# y.append(float(txt[10:]))\n# elif len(txt) == 19 : #nu >= 100 cm-1\n# x.append(float(txt[0:9]))\n# y.append(float(txt[9:]))\n# elif len(txt) == 18 : #nu >= 10 cm-1\n# x.append(float(txt[0:8]))\n# y.append(float(txt[8:]))\n# elif len(txt) == 17 : #nu >= 1 cm-1\n# x.append(float(txt[0:7]))\n# y.append(float(txt[7:]))\n\n #x[index_array],y[index_array] = float(txt[0:9]),float(txt[10:17])\n #index_array = index_array+1\n \n fs.close()\n x = np.array(x)\n y = np.array(y)\n if ((borne1 == 0) & (borne2 == 0)) :\n pass \n else :\n index_ok = ((x<borne2) & (x>borne1))\n x = x[index_ok]\n y = y[index_ok]\n\n return x,y", "def read_and_check_valid_params(instrument, file_header):\n non_valid_params = []\n file_loc = \"/grp/hst/cdbs/tools/jwst/valid_params/\" + change_style(instrument) + \"_valid_params.csv\"\n\n datetime1 = re.compile(\"([1][9]|([2][0-1]))\\d{2}-([0][0-9]|[1][0-2])-([0-2][0-9]|[3][0-1])T([0-1][0-9]|[2][0-3]):[0-5][0-9]:[0-5][0-9]\")\n datetime2 = re.compile(\"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\")\n inflight_datetime = re.compile(\"INFLIGHT ([1][9]|([2][0-1]))\\d{2}-([0][0-9]|[1][0-2])-([0-2][0-9]|[3][0-1]) ([1][9]|([2][0-1]))\\d{2}-([0][0-9]|[1][0-2])-([0-2][0-9]|[3][0-1])\")\n\n with open(file_loc, 'rb') as csvfile:\n keyreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in keyreader:\n if row[0] in file_header:\n #In the cases of SUBSTRT or SUBSIZE\n if type(file_header[row[0]]) is int:\n row[1:] = [int(x) for x in row[1:]]\n #If OR is present in value\n if type(file_header[row[0]]) is not int and \"|\" in file_header[row[0]]:\n values = file_header[row[0]].split(\"|\")\n if values[0] in row[1:]:\n pass\n else:\n non_valid_params.append((values[0], row[0]))\n\n if values[1] in row[1:]:\n pass\n else:\n non_valid_params.append((values[1], row[0]))\n #Valid value\n elif (type(file_header[row[0]]) is int or \"|\" not in file_header[row[0]]) \\\n and file_header[row[0]] in row[1:]:\n pass\n #Check USEAFTER\n elif row[0] == 'USEAFTER':\n if re.match(datetime1, file_header[row[0]]):\n pass\n elif re.match(datetime2, file_header[row[0]]):\n print (\"Correct format but inaccurate dates in USEAFTER\")\n non_valid_params.append((file_header[row[0]], row[0]))\n else:\n non_valid_params.append((file_header[row[0]], row[0]))\n #Check PEDIGREE\n elif row[0] == 'PEDIGREE':\n valid_options = ['SIMULATION', 'GROUND', 'DUMMY']\n if (file_header[row[0]] in valid_options) or re.match(inflight_datetime, file_header[row[0]]):\n pass\n else:\n non_valid_params.append((file_header[row[0]], row[0]))\n #Check's to see if certain headers are not empty\n elif row[0] in ['AUTHOR', 'DESCRIP', 'HISTORY']:\n if file_header[row[0]] == \"\":\n non_valid_params.append((file_header[row[0]], row[0]))\n #Not a valid value\n else:\n non_valid_params.append((file_header[row[0]], row[0]))\n else:\n pass\n if not non_valid_params:\n print (\"All parameters are valid\")\n else:\n print (\"Non-valid paramters (Format (Non-valid value, Header located in)): {}\".format(non_valid_params))", "def read_vmdas(self,):\n fd = self.f\n # The raw files produced by VMDAS contain a binary navigation data\n # block.\n self.cfg['sourceprog'] = 'VMDAS'\n ens = self.ensemble\n k = ens.k\n if self._source != 1 and self._debug_level >= 1:\n print(' \\n***** Apparently a VMDAS file \\n\\n')\n self._source = 1\n self.vars_read += ['time_gps',\n 'latitude_gps',\n 'longitude_gps',\n 'etime_gps',\n 'elatitude_gps',\n 'elongitude_gps',\n 'flags',\n 'ntime', ]\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[2] + utim[3] * 256, utim[1], utim[0])\n # This byte is in hundredths of seconds (10s of milliseconds):\n time = tmlib.timedelta(milliseconds=(int(fd.read_ui32(1) / 10)))\n fd.seek(4, 1) # \"PC clock offset from UTC\" - clock drift in ms?\n ens.time_gps[k] = tmlib.date2epoch(date + time)[0]\n ens.latitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.longitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.etime_gps[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) * 10)))[0]\n ens.elatitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.elongitude_gps[k] = fd.read_i32(1) * self._cfac\n fd.seek(12, 1)\n ens.flags[k] = fd.read_ui16(1)\n fd.seek(6, 1)\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[0] + utim[1] * 256, utim[3], utim[2])\n ens.ntime[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) / 10)))[0]\n fd.seek(16, 1)\n self._nbyte = 2 + 76", "def read_LD(read_fn):\n f = open(read_fn,'r')\n SIGMA = []\n array = []\n for line in f:\n line = line.strip()\n array = line.split()\n SIGMA.append(array)\n return SIGMA", "def loadFromFITSFile(filename, maxRows=0):\n\tultracam = False\n\tultraspec = False\n\tinputFile = astropy.io.fits.open(filename)\n\tfileInfo = inputFile.info()\n\t\n\tprint fileInfo\n\tprint len(inputFile)\n\tif len(inputFile)==4:\n\t\tprint \"We have an ULTRACAM file...\"\n\t\tultracam = True\n\tif len(inputFile)==2:\n\t\tprint \"We have an ULTRASPEC file...\"\n\t\tultraspec = True\n\t\n\tif ultraspec:\n\t\tCCDs = ['CCD 1']\n\tif ultracam:\n\t\tCCDs = ['CCD 1', 'CCD 2', 'CCD 3']\n\t\n\t\n\theaderBlock = str(inputFile[0].header)\n\t\n\t# Get some header info\n\ttargetName = generalUtils.getKeyValueFromFITSHeader('target', headerBlock)\n\tfilterName = generalUtils.getKeyValueFromFITSHeader('filter', headerBlock)\n\trunName = generalUtils.getKeyValueFromFITSHeader('Data file name', headerBlock, terminator=' ')\n\t\n\tfor CCD in CCDs:\n\t\theaders = inputFile[CCD].header\n\t\tdata = inputFile[CCD].data\n\t\tcolumns = inputFile[CCD].columns\n\t\n\t\tallData = []\n\t\n\t\tfor index, item in enumerate(data):\n\t\t\tallData.append(item)\n\t\t\tif maxRows!=0 and index>=maxRows-1: break\n\t\n\t\trows = len(allData)\n\t\tsys.stdout.write(\"\\rRead %d lines with the following columns, %s\\n\"%(rows, str(columns.names)))\n\t\tsys.stdout.flush()\n\t\n\t\t# Count the number of apertures in this data (using this method, the max is 9!)\n\t\tmaxApertureIndex = 0\n\t\tfor column in columns.names:\n\t\t\ttry:\n\t\t\t\tapertureIndex = int(column[-1])\n\t\t\texcept ValueError:\n\t\t\t\tapertureIndex = 0\n\t\t\tif apertureIndex > maxApertureIndex:\n\t\t\t\tmaxApertureIndex = apertureIndex\n\t\tprint \"This data file has %d apertures.\"%(maxApertureIndex)\n\t\n\t\tMJDIndex = columns.names.index('MJD')\n\t\tfor aperture in range(1, maxApertureIndex+1):\n\t\t\tprint \"Loading data for aperture #\", aperture\n\t\t\n\t\t\tphotometry = {}\n\t\t\tphotometry['MJD'] = \t\tdata.field('MJD')\n\t\t\tphotometry['exposure'] = \tdata.field('Expose')\n\t\t\tphotometry['FWHM'] = \t\tdata.field('FWHM')\n\t\t\tphotometry['beta'] = \t\tdata.field('beta')\n\t\t\tphotometry['x'] = \t\t\tdata.field('X_' + str(aperture))\n\t\t\tphotometry['y'] = \t\t\tdata.field('Y_' + str(aperture))\n\t\t\tphotometry['counts'] = \t\tdata.field('Counts_' + str(aperture))\n\t\t\tphotometry['sigma'] = \t\tdata.field('Sigma_' + str(aperture))\n\t\t\tphotometry['sky'] = \t\tdata.field('Sky_' + str(aperture))\n\t\t\tphotometry['sigma'] = \t\tdata.field('Sigma_' + str(aperture))\n\t\t\tphotometry['error'] = \t\tdata.field('Eflag_' + str(aperture))\n\t\t\n\t\t\tid = slots.getNextSlotID()\n\t\t\tprint \"new ID:\", id\n\t\t\tslot = photometryClasses.slotObject(id)\n\t\t\tslot.channels = ['ULTRASPEC']\n\t\t\tslot.target = targetName\n\t\t\tslot.filter = filterName\n\t\t\tslot.aperture = aperture\n\t\t\tslot.headers = headerBlock\n\t\t\tslot.runName = runName\n\t\t\tslot.setPhotometry(photometry)\n\t\t\tslot.setTimeColumn('MJD')\n\t\t\tslot.setYColumn('counts')\n\t\t\tslot.setYError('sigma')\n\t\t\tslot.CCD = CCD\n\t\t\tnumSlots = slots.addSlot(slot)\n\t\t\t# print \"Added the data to a new slot. Total number of slots is now: %d\"%(numSlots)\n\t\t\tprint slot\n\t\n\tinputFile.close()\n\t\t\n\treturn", "def read_parse_file(params):\n\tparam_names = []\n\tparam_options = []\n\tif not os.path.isfile(params.parse_file):\n\t\tprint(\"parse file does not exist! ({})\".format(params.parse_file))\n\t\tsys.exit(NO_PARSE)\n\twith open(params.parse_file, 'r') as pf:\n\t\t# first line should be iteration regex\n\t\tsetattr(params, 'iteration_regex', re.compile(pf.readline().strip()))\n\t\tfor line in pf:\n\t\t\tparam_desc = line.split(';')\n\t\t\tparam_names.append(param_desc[0])\n\t\t\tparam_options.append(param_desc[1])\n\n\treturn param_names,param_options", "def readtxt(obslog):\n\n logger = log.getLogger('obslog.readtxt')\n\n if not os.path.exists(obslog):\n logger.error('Cannot access %s', obslog)\n raise SystemExit\n\n logger.info('Reading %s', obslog)\n\n with open(obslog) as f: # Since we will have to go through the data twice, read the whole file at once.\n data = f.readlines()\n\n header = ['Observation ID', 'Data Labels', 'File Numbers', 'Dataset UT', 'Target Name', 'Filters', 'Slit',\n 'Grating/Wavelength', 'Camera/Prism', 'ExpTime/LNR/Coadds', 'ACQ']\n\n pattern = dict() # Enforce formatting rules to avoid parsing comments as data:\n pattern['Observation ID'] = re.compile(r'^G[NS]-[0-9]{4}[AB]-([CQ]|DD|FT|LP|SV)-[0-9]{0,3}-[0-9]+$')\n pattern['Data Labels'] = re.compile(r'[0-9]+-*[0-9]*') # 1, 2-3, 45-67, 890-1234\n pattern['File Numbers'] = re.compile(r'[0-9]+-*[0-9]*') # 1, 2-3, 45-67, 890-1234\n pattern['Dataset UT'] = re.compile(r'^[0-9]{2}:[0-9]{2}:[0-9]{2}$') # 09:58:15\n pattern['Target Name'] = re.compile(r'[a-zA-Z0-9_-]+') # Match any string\n pattern['Filters'] = re.compile(r'[A-Z0-9\\-]+') # H, XD, H2, X, J, H\n pattern['Slit'] = re.compile(r'[a-zA-Z0-9]+') # 0.675, ACQ, LgPin\n pattern['Grating/Wavelength'] = re.compile(r'[0-9]{2,3}/[0-9]\\.[0-9]{2}') # 32/1.65, 111/1.68\n pattern['Camera/Prism'] = re.compile(r'[A-Z]{2}/[A-Z]{3}') # LB/MIR, SB/SXD\n pattern['ExpTime/LNR/Coadds'] = re.compile(r'[0-9]+\\.[0-9]/[0-9]+/[0-9]+') # 0.2/1/25, 300.0/32/1\n pattern['ACQ'] = re.compile(r'^Y*$') # Y or ''\n\n indx = {}\n for line in data:\n if 'Electronic Observing Log' in line:\n date = line.split()[-1][7:]\n logger.debug('Log date: %s', date)\n if line[0:14] == 'Observation ID': # This defines the start of the header row\n for h in header:\n indx[h] = line.find(h) # Find where each column starts\n break # No need to go farther\n\n width = {} # Find the width of each row\n for i in range(len(header) - 1): # This requires that 'header' be an ordered array (not a dictionary)\n width[header[i]] = indx[header[i + 1]] - indx[header[i]]\n width[header[i+1]] = 1 # The ACQ field is either 'Y' or blank\n\n val = {}\n match = {}\n info = {}\n for line in data:\n logger.debug('\\n%s', line)\n files = []\n for h in header:\n val[h] = line[indx[h]: indx[h] + width[h]].strip()\n match[h] = re.match(pattern[h], val[h])\n logger.debug('%s: \"%s\" %s' % (h, val[h], match[h]))\n\n # Maybe throw a warning if only match 1 fails; indicating a likely bad pattern specification?\n\n if None in match.values():\n logger.debug('Failed to match all patterns -> This is a comment')\n continue\n\n if '-' in val['File Numbers']:\n start, stop = val['File Numbers'].split('-')\n for i in range(int(start), int(stop)+1):\n files.append(i)\n else:\n files.append(int(val['File Numbers']))\n\n for filenum in files:\n f = 'N%sS%04d.fits' % (date, filenum)\n logger.debug('File: %s', f)\n info[f] = {}\n for h in [header[0]] + header[3:]: # Skip 'Data Labels' and \"File Numbers'\n info[f][h] = val[h]\n\n logger.debug('info: %s', info)\n return info", "def load_info():\n data = np.loadtxt(\"u_sol_meta.txt\", dtype=int)\n return data", "def readInput(in_file_name):\n in_file = open(in_file_name, 'r')\n positions = []\n samples = []\n M = []; P = [];\n MC = []; PC = [];\n while True:\n line = in_file.readline()\n if not line: break\n if line[0] == '#': continue #skip comment\n line = line.rstrip('\\n').split('\\t')\n \n #genomic positions and allele support in plasma samples\n positions.append(int(line[0]))\n samples.append(tuple(map(int, line[1:5])))\n \n #maternal and paternal alleles\n M.append(tuple(line[5:7]))\n MC.append(tuple(map(float, line[7:9])))\n \n P.append(tuple(line[9:11]))\n PC.append(tuple(map(float, line[11:13]))) \n \n in_file.close()\n return positions, samples, M, P, MC, PC", "def parse_ben_sb(parent_directory, code):\n s_b = np.loadtxt(parent_directory+code+'res_ave.out')\n return s_b", "def t9_loadFile(self):\n print \"subspectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = \"SubSpectrumData/\" + self.filenameparser(filename)\n orgin_file = self.filenameparser(filename)\n self.t9_filename = filename\n self.t9_orginfilename = orgin_file", "def readfile(file, sub_im, cr):\n\n root, ext = os.path.splitext(file)\n\n if ext == '.tif':\n print('Reading tiff image:', file)\n par = readpar(root + '.mli.par')\n data = readtiff(file, sub_im, cr)\n\n else: # must be GAMMA flat binary float format\n print('Reading flat binary image', file)\n par = readpar(root + ext + '.par')\n data = readmli(file, par, sub_im, cr)\n\n # extract relevant metadata\n rho_r = float(par['range_pixel_spacing'].split()[0])\n rho_a = float(par['azimuth_pixel_spacing'].split()[0])\n theta = float(par['incidence_angle'].split()[0])\n\n return data, rho_r, rho_a, theta", "def read_calib_file(calib_path):\n data = {}\n with open(calib_path, 'r') as f:\n for line in f.readlines():\n if not line or line == \"\\n\":\n continue\n key, value = line.split(':', 1)\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n return data", "def read_calib_file(calib_path):\n data = {}\n with open(calib_path, 'r') as f:\n for line in f.readlines():\n if not line or line == \"\\n\":\n continue\n key, value = line.split(':', 1)\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n return data", "def read_parameters():\n hdulist1 = pf.open(source+'/kids_data/KiDS_DR3.1_G9_ugri_shear.fits')\n param1 = hdulist1[1].data['e1'][:sample]\n param2 = hdulist1[1].data['e2'][:sample]\n weights = hdulist1[1].data['weight'][:sample]\n return param1, param2, weights", "def _read_calibration_data(self):\n #Declare global variables.\n global calDig_T1\n global calDig_T2\n global calDig_T3\n global calDig_P1\n global calDig_P2\n global calDig_P3\n global calDig_P4\n global calDig_P5\n global calDig_P6\n global calDig_P7\n global calDig_P8\n global calDig_P9\n global calDig_H1\n global calDig_H2\n global calDig_H3\n global calDig_H4\n global calDig_H5\n global calDig_H6\n\n #Temperature calibration\n calDig_T1 = self._read_2bytes_as_ushort_lsbfirst(self.BME280_DIG_T1)\n calDig_T2 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_T2)\n calDig_T3 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_T3)\n\n #Pressure calibration\n calDig_P1 = self._read_2bytes_as_ushort_lsbfirst(self.BME280_DIG_P1)\n calDig_P2 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P2)\n calDig_P3 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P3)\n calDig_P4 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P4)\n calDig_P5 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P5)\n calDig_P6 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P6)\n calDig_P7 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P7)\n calDig_P8 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P8)\n calDig_P9 = self._read_2bytes_as_short_lsbfirst(self.BME280_DIG_P9)\n\n #Humidity calibration\n calDig_H1 = self._read_register_1sbyte(self.BME280_DIG_H1)\n calDig_H2 = self._read_2bytes_as_ushort_lsbfirst(self.BME280_DIG_H2)\n calDig_H3 = self._read_register_1sbyte(self.BME280_DIG_H3)\n calDig_H4 = (self._read_register_1sbyte(self.BME280_DIG_H4) << 4) | (self._read_register_1sbyte(self.BME280_DIG_H4 + 1) & 0xF)\n calDig_H5 = self._read_register_1sbyte((self.BME280_DIG_H5 + 1) << 4) | (self._read_register_1sbyte(self.BME280_DIG_H5) >> 4)\n calDig_H6 = self._read_register_1sbyte(self.BME280_DIG_H6)", "def loadSS2500(filename=None):\n\n import numpy as np\n from StringIO import StringIO\n import Tkinter\n from tkFileDialog import askopenfilename\n from matplotlib.pyplot import figure,subplot,plot,xlabel,ylabel,title,legend\n\n if (filename):\n print \"Opening %s\\n\" %(filename)\n else:\n root = Tkinter.Tk()\n root.withdraw()\n filename = askopenfilename(parent=root, title='Open File',\n filetypes=[('csv files','*.csv'),\n ('txt files', '*.txt')])\n root.destroy()\n root.mainloop()\n\n if filename is not None:\n f = open(filename)\n \n names = f.readline() #variable names\n names = names.strip('\\r\\n')\n names = names.split(\"\\t\")\n f.close()\n\n print('removing \\'' + names.pop(0) + '\\' from names list\\n')\n print('removing \\'' + names.pop(0) + '\\' from names list\\n')\n\n cols=np.arange(2, len(names))\n data = np.genfromtxt(filename, delimiter='\\t', unpack=True,\n skip_header=1, usecols=cols)\n timestrings = np.genfromtxt(filename, delimiter='\\t',\n unpack=True, skip_header=1,\n usecols=[1], dtype=str)\n time = np.zeros(len(timestrings))\n\n for i in range(len(timestrings)):\n s = StringIO(timestrings[i])\n timedata = np.genfromtxt(s, dtype=[int, int, float], delimiter=\":\")\n (hours, minutes, seconds) = timedata.item()\n time[i] = 3600. * hours + 60. * minutes + seconds\n\n PBT = names.index('Part Bed Temp.')\n PBDC = names.index('Part Bed Duty Cycle')\n\n figure()\n subplot(211)\n plot(time, data[PBT], label=names[PBT])\n ylabel(names[PBT] + r'$ \\left(^{\\circ}C\\right)$')\n\n subplot(212)\n plot(time, data[PBDC], label=names[PBDC])\n xlabel('Time(s)')\n ylabel(names[PBDC] + r'$ \\left( \\% \\right) $')\n\n return data, time, names", "def _setup(self):\n numerator = np.arange(1, MAX_NUMERATOR, dtype=float)\n denominator = np.arange(1, MAX_DENOMINATOR, dtype=float)\n outer = np.outer(numerator, 1/denominator)\n self.ratios = np.unique(outer[outer!=1])\n\n self.known_periods, self.known_dms, self.known_ras, self.known_decls = \\\n np.loadtxt(KNOWNPSR_FILENM, usecols=(0,1,2,3), unpack=True)", "def read_data(self):\n data = np.genfromtxt(self.__file) # Planck SED\n self.__nu = 10.0**data[:,0]\n self.__nuF = 10.0**data[:,2]\n self.__err = 10.0**data[:,3]\n #self.__W = 10.0**data[:,4]\n self.__yerr = [ self.__nuF - self.__nuF / self.__err, \\\n self.__nuF * self.__err - self.__nuF ]\n self.__maxY = max( self.__nuF )\n self.__minY = min( self.__nuF )", "def get_data():\n return np.genfromtxt(FILENAME, delimiter=',', skip_header=1)", "def _get_parameter_data(\n self, vis_hdu, read_source, run_check_acceptability, background_lsts=True\n ):\n # astropy.io fits reader scales date according to relevant PZER0 (?)\n # uvfits standard is to have 2 DATE parameters, both floats:\n # DATE (full day) and _DATE (fractional day)\n # cotter uvfits files have one DATE that is a double\n # using data.par('date') is general -- it will add them together if there are 2\n self.time_array = vis_hdu.data.par(\"date\")\n\n self.Ntimes = len(np.unique(self.time_array))\n\n # check if lst array is saved. It's not a standard metadata item in uvfits,\n # but if the file was written with pyuvdata it may be present\n # (depending on pyuvdata version)\n proc = None\n if \"LST\" in vis_hdu.data.parnames:\n # angles in uvfits files are stored in degrees, so convert to radians\n self.lst_array = np.deg2rad(vis_hdu.data.par(\"lst\"))\n if run_check_acceptability:\n (\n latitude,\n longitude,\n altitude,\n ) = self.telescope_location_lat_lon_alt_degrees\n uvutils.check_lsts_against_times(\n jd_array=self.time_array,\n lst_array=self.lst_array,\n latitude=latitude,\n longitude=longitude,\n altitude=altitude,\n lst_tols=self._lst_array.tols,\n frame=self._telescope_location.frame,\n )\n\n else:\n proc = self.set_lsts_from_time_array(background=background_lsts)\n\n # if antenna arrays are present, use them. otherwise use baseline array\n if \"ANTENNA1\" in vis_hdu.data.parnames and \"ANTENNA2\" in vis_hdu.data.parnames:\n # Note: we no longer subtract 1 from the antenna arrays\n # The antanna arrays are not indices but rather are numbers\n # that correspond to particular antennas\n self.ant_1_array = np.int32(vis_hdu.data.par(\"ANTENNA1\"))\n self.ant_2_array = np.int32(vis_hdu.data.par(\"ANTENNA2\"))\n # for instruments with a single subarray, the subarray should be set to 1s\n subarray = np.int32(vis_hdu.data.par(\"SUBARRAY\"))\n # error on files with multiple subarrays\n if len(set(subarray)) > 1:\n raise ValueError(\n \"This file appears to have multiple subarray \"\n \"values; only files with one subarray are \"\n \"supported.\"\n )\n else:\n # cannot set this to be the baseline array because it uses the\n # 256 convention, not our 2048 convention\n bl_input_array = np.int64(vis_hdu.data.par(\"BASELINE\"))\n\n # get antenna arrays based on uvfits baseline array\n self.ant_1_array, self.ant_2_array = self.baseline_to_antnums(\n bl_input_array\n )\n\n if read_source:\n source = vis_hdu.data.par(\"SOURCE\")\n self.phase_center_id_array = source.astype(int)\n\n # get self.baseline_array using our convention\n self.baseline_array = self.antnums_to_baseline(\n self.ant_1_array, self.ant_2_array\n )\n self.Nbls = len(np.unique(self.baseline_array))\n\n # initialize internal variables based on the antenna lists\n self.Nants_data = int(np.union1d(self.ant_1_array, self.ant_2_array).size)\n\n # check for suffixes in the baseline coordinate names indicating the\n # baseline coordinate system\n if (\n \"UU\" in vis_hdu.data.parnames\n and \"VV\" in vis_hdu.data.parnames\n and \"WW\" in vis_hdu.data.parnames\n ):\n uvw_names = [\"UU\", \"VV\", \"WW\"]\n elif (\n \"UU---SIN\" in vis_hdu.data.parnames\n and \"VV---SIN\" in vis_hdu.data.parnames\n and \"WW---SIN\" in vis_hdu.data.parnames\n ):\n uvw_names = [\"UU---SIN\", \"VV---SIN\", \"WW---SIN\"]\n elif (\n \"UU---NCP\" in vis_hdu.data.parnames\n and \"VV---NCP\" in vis_hdu.data.parnames\n and \"WW---NCP\" in vis_hdu.data.parnames\n ):\n uvw_names = [\"UU---NCP\", \"VV---NCP\", \"WW---NCP\"]\n warnings.warn(\n \"The baseline coordinates (uvws) in this file are specified in the \"\n \"---NCP coordinate system, which is does not agree with our baseline \"\n \"coordinate conventions. Rotating the uvws to match our convention \"\n \"(Note that this rotation has not been widely tested).\"\n )\n else:\n raise ValueError(\n \"There is no consistent set of baseline coordinates in this file. \"\n \"The UU, VV and WW coordinate must have no suffix or the '---SIN' or \"\n \"'---NCP' suffix and the suffixes must match on all three baseline \"\n \"coordinate parameters.\"\n )\n\n # read baseline vectors in units of seconds, return in meters\n # FITS uvw direction convention is opposite ours and Miriad's.\n # So conjugate the visibilities and flip the uvws:\n # uvfits files often have uvws in single precision rather than double precision.\n # setting the dtype below enforces double precision\n self.uvw_array = (-1) * (\n np.array(\n np.stack(\n (\n vis_hdu.data.par(uvw_names[0]),\n vis_hdu.data.par(uvw_names[1]),\n vis_hdu.data.par(uvw_names[2]),\n )\n ),\n dtype=self._uvw_array.expected_type,\n )\n * const.c.to(\"m/s\").value\n ).T\n\n if \"INTTIM\" in vis_hdu.data.parnames:\n self.integration_time = np.asarray(\n vis_hdu.data.par(\"INTTIM\"), dtype=np.float64\n )\n else:\n if self.Ntimes > 1:\n # assume that all integration times in the file are the same\n int_time = self._calc_single_integration_time()\n self.integration_time = (\n np.ones_like(self.time_array, dtype=np.float64) * int_time\n )\n else:\n warnings.warn(\n \"The integration time is not specified and only one time is \"\n \"present so it cannot be calculated from the difference between \"\n \"integration times. Setting to None which will cause the check to \"\n \"error. Set `run_check` to False to read in the file without \"\n \"checking. Then set the integration_time (to an array of length \"\n \"Nblts) directly on the object to allow futher processing.\"\n )\n\n if proc is not None:\n proc.join()", "def readMagneticsObservations(self, obs_file):\n\n fid = open(self.basePath + obs_file,'r')\n\n # First line has the inclination,declination and amplitude of B0\n line = fid.readline()\n B = np.array(line.split(),dtype=float)\n\n # Second line has the magnetization orientation and a flag\n line = fid.readline()\n M = np.array(line.split(),dtype=float)\n\n # Third line has the number of rows\n line = fid.readline()\n ndat = np.array(line.split(),dtype=int)\n\n # Pre-allocate space for obsx, obsy, obsz, data, uncert\n line = fid.readline()\n temp = np.array(line.split(),dtype=float)\n\n d = np.zeros(ndat, dtype=float)\n wd = np.zeros(ndat, dtype=float)\n locXYZ = np.zeros( (ndat,3), dtype=float)\n\n for ii in range(ndat):\n\n temp = np.array(line.split(),dtype=float)\n locXYZ[ii,:] = temp[:3]\n\n if len(temp) > 3:\n d[ii] = temp[3]\n\n if len(temp)==5:\n wd[ii] = temp[4]\n\n line = fid.readline()\n\n rxLoc = BaseMag.RxObs(locXYZ)\n srcField = BaseMag.SrcField([rxLoc],param=(B[2],B[0],B[1]))\n survey = BaseMag.LinearSurvey(srcField)\n survey.dobs = d\n survey.std = wd\n return survey", "def subsample():\n\n nwav = 872\n nrow = 1600\n ncol = 1560\n\n fpath = os.path.join(HYSS_ENVIRON['HYSS_WRITE'],'raw_binned/nrow1600')\n fnames = ['full_frame_20ms_faster_VNIR_1600.raw',\n 'full_frame_20ms_faster_VNIR_1600_flat.raw']\n\n for fname in fnames:\n print(\"SUBSAMPLE: reading data from {0}\".format(fpath))\n print(\"SUBSAMPLE: {0}\".format(fname))\n data = np.fromfile(os.path.join(fpath,fname)).reshape(nwav,nrow,ncol)\n\n for fac in [2,4,8]:\n trow = '{0:04}'.format(1600/fac)\n opath = os.path.join(HYSS_ENVIRON['HYSS_WRITE'],'raw_subsample',\n 'nrow'+trow)\n oname = fname.replace('1600',trow)\n\n print(\"SUBSAMPLE: writing subsampled data to {0}\".format(opath))\n print(\"SUBSAMPLE: {0}\".format(oname))\n data[:,::fac,::fac].tofile(open(os.path.join(opath,oname),'wb'))\n\n return", "def read_ini(ini_file): \n project = ''\n bd_analysis_time = bd_interval_time = '' \n bd_ripper_analysis_time = bd_ripper_interval_time = '' \n dvd_analysis_time = dvd_interval_time = '' \n file_analysis_time = file_interval_time = '' \n bd_path = dvd_path = file_path = ''\n bd_path_mac = dvd_path_mac = file_path_mac = ''\n params_dict = {}\n \n if os.path.exists(ini_file): \n try:\n config = ConfigParser.ConfigParser()\n config.readfp(open(ini_file))\n except Exception, e:\n initlog('failed to read ini file; %s' % str(e)) \n else: \n try:\n project = (config.get('Project', 'project')) \n bd_analysis_time = int(config.get('BD/3Dcopy','analysis time')) \n bd_interval_time = int(config.get('BD/3Dcopy','interval time')) \n bd_ripper_analysis_time = int(config.get('BD/3Dcopy','ripper analysis time')) \n bd_ripper_interval_time = int(config.get('BD/3Dcopy','ripper interval time')) \t\t\t\t\n dvd_analysis_time = int(config.get('DVD/DVDcopy','analysis time')) \n dvd_interval_time = int(config.get('DVD/DVDcopy','interval time'))\n file_analysis_time = int(config.get('FILE','analysis time')) \n file_interval_time = int(config.get('FILE','interval time'))\n bd_path = config.get('BD/3Dcopy','bd_path')\n dvd_path = config.get('DVD/DVDcopy', 'dvd_path')\n file_path = config.get('FILE','file_path')\n bd_path_mac = config.get('BD/3Dcopy','bd_path_mac')\n dvd_path_mac = config.get('DVD/DVDcopy', 'dvd_path_mac')\n file_path_mac = config.get('FILE','file_path_mac')\n except Exception, e:\n initlog('read ini file error; %s' % str(e))\n else:\n initlog('dvdfab_auto_tool.ini file does not exist')\n params_dict[\"project\"] = project\n params_dict[\"bd_analysis_time\"] = bd_analysis_time\n params_dict[\"bd_interval_time\"] = bd_interval_time\n params_dict[\"bd_ripper_analysis_time\"] = bd_ripper_analysis_time\n params_dict[\"bd_ripper_interval_time\"] = bd_ripper_interval_time\n params_dict[\"dvd_analysis_time\"] = dvd_analysis_time\n params_dict[\"dvd_interval_time\"] = dvd_interval_time\n params_dict[\"file_analysis_time\"] = file_analysis_time\n params_dict[\"file_interval_time\"] = file_interval_time\n params_dict[\"bd_path\"] = bd_path\n params_dict[\"dvd_path\"] = dvd_path\n params_dict[\"file_path\"] = file_path\n params_dict[\"bd_path_mac\"] = bd_path_mac\n params_dict[\"dvd_path_mac\"] = dvd_path_mac\n params_dict[\"file_path_mac\"] = file_path_mac\n return params_dict\n return project, bd_analysis_time, bd_interval_time, bd_ripper_analysis_time, bd_ripper_interval_time, dvd_analysis_time, dvd_interval_time, file_analysis_time, file_interval_time, bd_path, dvd_path, file_path,bd_path_mac,dvd_path_mac,file_path_mac" ]
[ "0.58298117", "0.5741129", "0.5640352", "0.54980344", "0.5496068", "0.5432148", "0.5394989", "0.53380096", "0.53087157", "0.5273573", "0.52647877", "0.5246655", "0.5224779", "0.5212376", "0.52076614", "0.5198927", "0.5182285", "0.5177914", "0.5172997", "0.5172781", "0.516248", "0.51255375", "0.51171046", "0.5105941", "0.5099218", "0.5091882", "0.50832456", "0.5080764", "0.50803757", "0.50546193", "0.50527054", "0.5044653", "0.5040873", "0.5032236", "0.50246316", "0.50191927", "0.50174356", "0.5009763", "0.5006495", "0.50053096", "0.49876255", "0.49859262", "0.4982619", "0.49814436", "0.49780142", "0.49719384", "0.49714708", "0.49642652", "0.4958006", "0.4958006", "0.49578023", "0.49524996", "0.4950309", "0.49430704", "0.49374324", "0.49348402", "0.4931605", "0.4930984", "0.49265754", "0.4912353", "0.49096575", "0.49028528", "0.49028528", "0.4902084", "0.48928297", "0.4890679", "0.48889562", "0.4888875", "0.48802516", "0.48742774", "0.4870078", "0.4859095", "0.48590168", "0.48587915", "0.48563844", "0.48502022", "0.48457077", "0.48438755", "0.48429817", "0.484295", "0.48414785", "0.48413098", "0.48314592", "0.48306695", "0.4814016", "0.48125717", "0.4809282", "0.48092803", "0.48063955", "0.48063955", "0.48059025", "0.48053414", "0.4803543", "0.48031273", "0.48003203", "0.47987923", "0.47943285", "0.47912627", "0.47871318", "0.4782451" ]
0.7251039
0
Old string code that takes any 2D list and maps it to a 1D list by adding each subsequent row to the end of the first row
def transformation_flatten(twoDlistinput): oneDlistoutput = [] for i in range(len(twoDlistinput)): for j in range(len(twoDlistinput[i])): oneDlistoutput.append(twoDlistinput[i][j]) return(oneDlistoutput)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rebuild_row(lst, is_collocation):\n split_list = lst[0].split(\"\\t\")\n if is_collocation:\n return [split_list[0] + \" \" + split_list[1], \"1\"]\n return [split_list[0] + \" \" + split_list[1], \"0\"]", "def transpose(str_list):\n # takes list of strings of dimensions n x m\n if (n := len(str_list)) == 0:\n return []\n if (m := len(str_list[0])) == 1 and n == m:\n return str_list\n transposed_board = []\n for j in range(m):\n transposed_board.append('')\n for i in range(n):\n transposed_board[j] += str_list[i][j]\n return transposed_board", "def parse2DList(self,string):\r\n string = string.replace(\"[\",\"\")\r\n string = string.replace(\"],\",\"*\")\r\n string = string.replace(\"]\", \"\")\r\n string = string.split(\"*\")\r\n for i in xrange(len(string)):\r\n string[i] = string[i].split(\",\")\r\n for i in xrange(len(string)):\r\n for j in xrange(len(string[i])):\r\n string[i][j] = int(string[i][j])\r\n string[i] = list(string[i])\r\n return string", "def expand_dim_str(lstring):\n a = np.array(lstring)\n dim = len(a.shape)\n new_2d=[]\n if dim == 1:\n ### ele is string\n for ele in a:\n new_list=ele.split()\n new_2d.append(new_list)\n return new_2d", "def row_to_col(lst_lst: List[List[Any]]) -> List[List[Any]]:\n head = (lambda x: x[0])\n tail = (lambda x: x[1:])\n if not lst_lst or not head(lst_lst): # c.f. the remark about recursion termination\n return []\n else:\n return [list(map(head, lst_lst))] + (row_to_col(list(map(tail, lst_lst))))", "def expand_row(\n row: Sequence[Union[str, Sequence[Union[str, Sequence[str]]]]]\n) -> List[List[str]]:\n elems_as_lists = []\n for elem in row:\n if isinstance(elem, list):\n elems_as_lists.append(elem)\n else:\n elems_as_lists.append([elem])\n aligned = [list(i) for i in zip_longest(*elems_as_lists, fillvalue=\"\")]\n return aligned", "def _advanceList(self, list):\n\t\tnewList = [\" \"] * len(list)\n\t\tfor i in range(len(list) - 1):\n\t\t\tnewList[i + 1] = list[i]\n\t\treturn newList", "def _add_from_list(self, row) :\n\n data = [0]\n data.extend(row[:len(self.cols)-1])\n cols = self.cols[:len(data)]\n self._insert_internal(cols, data)", "def lines_to_matrix(lines):\n\n for index, line in enumerate(lines):\n lines[index] = [char for char in line]\n\n return lines", "def string_list_to_actg(b: list[list[str]]) -> list[list[str]]:\r\n s_out = b\r\n for i in range(len(s_out)):\r\n for j in range(len(s_out[i])):\r\n s_out[i][j] = single_string_to_actg(b[i][j])\r\n return s_out", "def parse1DList(self,string):\r\n string = string.replace(\"[\",\"\")\r\n string = string.replace(\"]\",\"\")\r\n string = string.split(\",\")\r\n for i in xrange(len(string)):\r\n string[i] = float(string[i])\r\n string = list(string)\r\n return string", "def matrixify(string_grid, separator='\\n'):\n return string_grid.split(separator)", "def ijoin_lists(l):\n if l:\n try:\n if not all(ymap(isinstance, l, list)):\n from tek.errors import MooException\n raise MooException('Some elements aren\\'t lists!')\n for i in cumsum([0] + list(map(len, l[:-1]))):\n l[i:i+1] = l[i]\n except Exception as e:\n logger.debug('ijoin_lists failed with: ' + str(e))\n return l", "def flatten_2D_list(list_2d):\n return [item for sub in list_2d for item in sub]", "def getStringArray2D(self) -> typing.List[typing.List[str]]:\n ...", "def convertToTwoDList(l, n):\n\treturn [l[i:i+n] for i in range(0, len(l), n)]", "def _combine_omnipage_cell_list(table, inds, row_flag):\n if row_flag:\n row_or_col_list = [table[i, :] for i in inds]\n else:\n row_or_col_list = [table[:, i] for i in inds]\n return [' '.join(_unique_sorted([str(k) for k in j])).strip()\n for j in zip(*row_or_col_list)]", "def __join_expanded(expanded: list[typing.Union[str, list[str]]]) -> list[str]:\n list_values = [(i, val) for i, val in enumerate(expanded) if isinstance(val, list)]\n\n if len(list_values) == 0:\n return [\"\".join(expanded)]\n\n initial_len = len(list_values[0][1]) if list_values else None\n\n if not all(len(i) == initial_len for _, i in list_values[1::]):\n raise ValueError(\"not all non-expanded list are of the same size\")\n\n pairs = zip(*[[(i, j) for j in val] for i, val in list_values])\n\n result = list()\n for pair in pairs:\n cc = expanded.copy()\n\n for i, v in pair:\n del(cc[i])\n cc.insert(i, v)\n\n result.append(\"\".join(cc))\n\n return result", "def align_lis_lis(lis_lis):\n lis_lis = [[str(l) for l in lis]\n for lis in lis_lis] # trans every element to str\n #make all inner lists of the same length\n inner_lis_max_len = max(len(lis) for lis in lis_lis)\n lis_lis = [lis + (inner_lis_max_len - len(lis)) * [''] for lis in lis_lis]\n #trans list, so that the elements of the same column are in one list\n lis_lis = [[lis[i] for lis in lis_lis] for i in range(inner_lis_max_len)]\n #make element in the same list have the same length\n aligned = []\n for lis in lis_lis:\n width = max([len(l) for l in lis])\n lis = [l + (width - len(l)) * ' ' for l in lis]\n aligned.append(lis)\n #trans list_list to the original list_list\n inner_lis_max_len = max(len(lis) for lis in lis_lis)\n lis_lis = [[lis[i] for lis in aligned] for i in range(inner_lis_max_len)]\n return lis_lis", "def string_list_to_cells(lst):\n cells = np.ndarray(len(lst), dtype = 'object')\n for i in range(len(lst)):\n cells[i] = lst[i]\n return cells", "def stringify_list(rows, separator='\\t'):\n return (separator.join(toolz.map(text_type, row)) for row in rows)", "def matrixstr(inputstr, converter=proper):\n ys = basicformat(inputstr).split(\"\\n\")\n for x in xrange(0,len(ys)):\n ys[x] = basicformat(ys[x])[1:-1].split(\",\")\n for z in xrange(0, len(ys[x])):\n ys[x][z] = converter(ys[x][z])\n return matrixlist(ys, converter)", "def matrix_to_string(main_list):\n output = \"\"\n for sub_list in main_list:\n for element in sub_list:\n output += element\n output += \"\\n\"\n return output.rstrip(\"\\n\")", "def add_row(matrix):\n\tl = len(matrix[0])\n\ttemp = matrix[:]\n\ttemp += [[0]*l]\n\treturn temp", "def well_list_from_region (row_strs, col_strs):\n\n well_pos_list = []\n for i in row_strs:\n for j in col_strs:\n well_pos_list += [ i + j ]#have to have extra list brackets to avoid python interpreting a string 'FFF' as\n #a list ['F', 'F', 'F'] and adding 3 items instead of 'FFF'\n\n return well_pos_list", "def convert(self, s, numRows):\n \n if numRows == 1:\n return s\n \n matrix = []\n for i in range(numRows):\n\n # Change from storing matrices of letter to strings\n # matrix.append([])\n matrix.append('')\n\n r = 0\n \n for i in range(len(s)):\n # print(\"r:{}\".format(r))\n # print(\"i:{}\".format(i))\n\n # Change from storing matrices of letter to strings\n # matrix[r].append(s[i])\n matrix[r]+=s[i]\n\n if i % ((numRows-1)*2) < (numRows-1): \n r += 1\n else:\n r -= 1\n \n # print(matrix)\n\n # Now don't have to join individual strings first\n # for j in range(len(matrix)):\n # matrix[j]=''.join(matrix[j])\n \n return ''.join(matrix)", "def acgt_to_string(s: list[list[str]]) -> list[list[str]]:\r\n s_out = [[\"\"] for i in range(len(s))]\r\n for i in range(len(s) - 1):\r\n h = \"\"\r\n for j in range(len(s[i])):\r\n if s[i][j] == 0:\r\n h += \"00\"\r\n if s[i][j] == 1:\r\n h += \"01\"\r\n if s[i][j] == 2:\r\n h += \"10\"\r\n if s[i][j] == 3:\r\n h += \"11\"\r\n s_out[i][0] = h\r\n return s_out", "def createOneRow(width):\r\n row = []\r\n for col in range(width):\r\n row += [0]\r\n return row\r\n #return [0 for x in range(width)]\r", "def flatten_2d(a_2dlist):\n return list(itertools.chain(*a_2dlist))", "def transform_input(data: str) -> Matrix:\n return [\n list(map(int, list(row)))\n for row in data.split('\\n')\n ]", "def build_list(self, l):\n comma = self.art_type([self.string_type(', ')],\n baseline=0,\n breakpoints=[1])\n repr_elems = self.concatenate(l, comma)\n return self.build_container(\n repr_elems, self.left_square_bracket, self.right_square_bracket)", "def convertToRow(initialState):\n size = np.sqrt(len(initialState))\n rowform = ''\n for i in range(int(size)):\n j = i;\n while j < len(initialState):\n rowform += initialState[j]\n j += 3\n return rowform", "def cell2mat2(l, max_len=None) -> nptyp.NDArray[float]:\n if max_len is None:\n max_len = np.amax([len(l1) for l1 in l])\n \n n = len(l)\n m = np.zeros([n, max_len]) + np.nan\n \n for ii in range(n):\n l1 = l[ii]\n if len(l1) > max_len:\n m[ii,:] = l1[:max_len]\n elif len(l1) < max_len:\n m[ii,:len(l1)] = l1\n else:\n m[ii,:] = l1\n\n return m", "def merge_AllLeft(lsts):\r\n new_lsts = []\r\n for row in lsts:\r\n array1 = add_tiles(row)\r\n new_lsts.append(array1)\r\n lsts = new_lsts\r\n\r\n return lsts", "def turnArraySideways(array):\n newList = []\n\n for item in array[0]:\n newList.append([])\n\n x = -1\n for row in array:\n x = x + 1\n y = - 1\n for item in row:\n y = y + 1\n newList[y].append(item)\n\n return newList", "def reconstruct_lnotab(first_line, offsets, lines):\n current_offset, current_line = 0, first_line\n new_lnotab = []\n for offset, line in zip(offsets, lines):\n new_offset = offset - current_offset\n while new_offset > 255:\n new_lnotab.append(255)\n new_lnotab.append(0)\n new_offset -= 255\n new_lnotab.append(new_offset)\n new_line = line - current_line\n while new_line > 255:\n new_lnotab.append(255)\n new_lnotab.append(0)\n new_line -= 255\n new_lnotab.append(new_line)\n current_offset, current_line = offset, line\n return array.array('B', new_lnotab).tostring()", "def add_dummy_location_to_matrix(matrix):\n matrix = [row + [0] for row in matrix]\n last_row = [0 for _ in range(len(matrix) + 1)]\n matrix.append(last_row)\n return matrix", "def train_transpose(string):\r\n \r\n data = []\r\n linedata = []\r\n worddata = []\r\n for letter in string:\r\n if letter == \"\\n\":\r\n linedata.append(worddata)\r\n data.append(linedata)\r\n linedata = []\r\n worddata = []\r\n elif letter == \" \" or letter == \":\":\r\n linedata.append(worddata)\r\n worddata = []\r\n else:\r\n worddata.append(letter)\r\n linedata.append(worddata)\r\n data.append(linedata)\r\n return data", "def compact_simple_list(match):\n # Calculate the initial indent as the length of the first match group\n initial_indent = len(match.group(1))\n\n # Get the lines in the match\n lines = match.group(2).splitlines()\n\n # Set the indent by finding the indent of the first lines\n if len(lines) > 1:\n subsequent_indent = len(lines[1]) - len(lines[1].lstrip())\n else:\n subsequent_indent = 0\n\n # Strip whitespace from the lines\n lines = [l.strip() for l in lines]\n\n # Create and return the string wrapped about 80 chars\n list_string = \"\\n\".join(\n textwrap.wrap(\n \" \".join(lines),\n 80,\n initial_indent=\" \" * initial_indent,\n subsequent_indent=\" \" * subsequent_indent,\n )\n ).lstrip()\n\n # Return the string\n return match.group(1) + list_string", "def mapper(list_of_textlines):\n text = [i.lower() for i in list_of_textlines]\n text = [re.subn(\"\\s+|\\n+\", \" \", i)[0] for i in text]\n text = [re.subn(\"[.!@#$%^&*()-_+=,./?\\\"'|\\}{:;]+\", \" \", i)[0] for i in text]\n text = [re.split(\"\\s+\", i) for i in text]\n text = [[i for i in j if i != ''] for j in text]\n text = [i for i in text if len(i) > 0]\n text = [item for sublist in text for item in sublist]\n\n return text", "def prefix_all(value, LL):\n return [[value] + L for L in LL]", "def allstrings2(alphabet, length):\n\n c = []\n for i in range(length):\n c = [[x]+y for x in alphabet for y in c or [[]]]\n\n return c", "def interpose(el, seq):\n return rest(mapcat(lambda x: [el, x], seq))", "def uniform_list_length(labels):\n max_num = max([len(i) for i in labels])\n for label in labels:\n for num in range(1, max_num):\n if len(label) == num:\n label.extend([\" \" for i in range(max_num - num)])\n return labels", "def _string_to_matrix(str_in: str):\n nums = list(str_in)\n n = int(len(nums) ** 0.5)\n return list(map(list, zip(*[map(str, nums)] * n)))", "def wrap_list(list1, fmt = '%16s', delimiter = \",\", maxcols = 8):\n len1 = len(list1)\n string = \"\"\n for i in range(0, len1, maxcols):\n li = list1[i : i + maxcols]\n stri = format_list(li, fmt = fmt, delimiter = delimiter)\n string += stri\n return string", "def createOneRow(width):\n row = []\n for col in range(width):\n row += [0]\n return row", "def createOneRow(width):\n row = []\n for col in range(width):\n row += [0]\n return row", "def transpose(lsts):\r\n\r\n new_lsts = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\r\n for i in range(4):\r\n for j in range(4):\r\n new_lsts[i][j] = lsts[j][i]\r\n lsts = new_lsts\r\n\r\n return lsts", "def array_to_list(llist, source):\r\n while source: #a list is considered True as long as it is not empty\r\n llist.append(source.pop(0))\r\n \r\n return", "def createOneRow(width):\r\n row = []\r\n for col in range(width):\r\n row += [0]\r\n return row", "def strToList(S):\r\n if len(S) == 0: return []\r\n return [S[0]] + strToList(S[1:])", "def collate_lats(elements_list):\n\n source = [e for e, _, _ in elements_list]\n targets = [t for _, t, _ in elements_list]\n utt_ids = [u for _, _, u in elements_list]\n return padding(source, targets), utt_ids", "def rotate1(self, matrix: List[List[int]]) -> None:\n matrixLen = len(matrix)\n\n for i in range(matrixLen):\n for j in range(i, matrixLen):\n print(i, j)\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n for i in range(matrixLen):\n for j in range(matrixLen // 2):\n matrix[i][j], matrix[i][matrixLen - 1 - j] = matrix[i][matrixLen - 1 - j], matrix[i][j]", "def rejoin(l0,l1=None):\r\n while len(l0) != 0:\r\n if l1 is None:\r\n l1 = []\r\n if l0[0] == '<':\r\n if len(l0) >= 3:\r\n if l0[2] == '>':\r\n l1 += [l0.pop(0) + l0.pop(0) + l0.pop(0)]\r\n else:\r\n l1 += [l0.pop(0)]\r\n return l1", "def conver1D(array):\n l = array.shape\n total = np.zeros((0, l[1] * l[2]), dtype=np.float32)\n i = 0\n for i in range(24):\n tempData = array[i]\n array1D = []\n for x in tempData:\n for s in x:\n array1D.append(s)\n total = np.insert(total, i, array1D, axis=0)\n return total", "def zip_longest_strings(arr):\n # Pad lengths with whitespace strings\n max_row_len = max(len(row) for row in arr)\n arr = [row + [rep_str(' ', len(row[0]))] * (max_row_len - len(row))\n for row in arr]\n # Transpose\n return map(list, zip(*arr))", "def list_data_collate(batch):\n elem = batch[0]\n data = [i for k in batch for i in k] if isinstance(elem, list) else batch\n return default_collate(data)", "def twistgrid(crossword):\n n = len(crossword)\n twisted = []\n for col in range(n):\n tw_row = [crossword[i][col] for i in range(n)]\n tw_row = ''.join(tw_row)\n twisted.append(tw_row)\n # print(twisted)\n return twisted", "def transpose(string):\r\n \r\n data = []\r\n linedata = []\r\n worddata = []\r\n for letter in string:\r\n if letter == \"\\n\":\r\n linedata.append(worddata)\r\n data.append(linedata)\r\n linedata = []\r\n worddata = []\r\n elif letter == \" \":\r\n linedata.append(worddata)\r\n worddata = []\r\n else:\r\n worddata.append(letter)\r\n linedata.append(worddata)\r\n data.append(linedata)\r\n return data", "def concat_list(lst, batch_flags=None):\n slices = [slice(0)] * len(lst)\n datas = []\n row_flag = 0\n for i, r in enumerate(lst):\n if r is None:\n slices[i] = None\n continue\n j = -1\n if batch_flags is None or batch_flags[i]:\n for j, d in enumerate(r):\n datas.append(d)\n slices[i] = slice(row_flag, row_flag + j + 1)\n else:\n datas.append(r)\n slices[i] = row_flag\n row_flag += j + 1\n return datas, slices", "def toeplitz(x):\n t = []\n for i in range(len(x)):\n row = []\n for j in range(len(x)):\n if i < j:\n row.append(x[j-i])\n elif i == j:\n row.append(x[0])\n else:\n row.append(x[i-j])\n t.append(row)\n return t", "def interleave_binarystr(str_list):\n ret = \"\"\n for i in range(0, len(min(str_list)), 2):\n for j in range(len(str_list)):\n ret += str_list[j][i:(i + 2)]\n return ret", "def map_view(state):\n string_rows = []\n\n for row in state:\n string_row1 = []\n string_row2 = []\n for cell in row:\n if \"grass\" not in cell and \"lapis_block\" not in cell:\n string_row1.append(\"XXX\")\n string_row2.append(\"XXX\")\n else:\n bottom_corners = \"E\" if \"lapis_block\" in cell else \" \"\n string_row1.append((\"A\" if \"Agent_2\" in cell else \" \") + \" \" +\n (\"P\" if \"Pig\" in cell else \" \"))\n string_row2.append(bottom_corners + (\"C\" if \"Agent_1\" in cell else \" \") + bottom_corners)\n string_rows.append(\"\".join(string_row1))\n string_rows.append(\"\".join(string_row2))\n\n return \"\\n\".join(string_rows)", "def ragged_to_regular(array_list):\n join_length = len(array_list)\n # the weird line below is faster than allocating numpy arrays\n dims = list(zip(*[array.shape for array in array_list]))\n max_dims = tuple(max(dim) for dim in dims)\n dtype = array_list[0].dtype\n padded_hypercube = np.zeros((join_length,) + max_dims, dtype=dtype)\n for i in range(join_length):\n multislice = (slice(i, i+1, 1),) + tuple(slice(0, dim[i], 1)\n for dim in dims)\n padded_hypercube[multislice] = array_list[i]\n return padded_hypercube", "def convert_to_string_array(matrix):\n res = []\n for row in matrix:\n res.append(''.join(row))\n return '\\n'.join(res)", "def str_transform_list(L):\n return [str(x) for x in L]", "def map(self, seq):\n result = []\n\n for elt in seq:\n if isinstance(elt, list):\n result.append(self.map(elt))\n else:\n result.append(self(elt))\n\n return result", "def deflatten(flat_li, *original_li):\n if len(original_li) == 1:\n original_li = original_li[0]\n deflatten_li = []\n i = 0\n for el in original_li:\n if isinstance(el, Sequence):\n deflatten_li.append(flat_li[i:i+len(el)])\n i += len(el)\n else:\n deflatten_li.append(flat_li[i])\n i += 1\n return deflatten_li", "def sudoku_region_to_line(A):\r\n n = len(A)\r\n B = [0 for i in range(n*n)]\r\n for i in range(n):\r\n for j in range(n):\r\n B[i + j * n] = A[j][i]\r\n return B", "def flatten_list(lst):\n assert isinstance(lst, list), \"you didn't pass a list!\"\n\n if isinstance(lst[0], list):\n if len(lst[0])>1:\n return ['-'.join(i) for i in lst] # then its a kpoints list\n return flatten_list([i[0] for i in lst])\n else:\n return [i for i in lst]", "def push_row(row, left=True):\n\trow = row[:] if left else row[::-1]\n\tnew_row = [item for item in row if item]\n\tfor i in range(len(new_row)-1):\n\t\tif new_row[i] and new_row[i] == new_row[i+1]:\n\t\t\tnew_row[i], new_row[i+1:] = new_row[i]*2, new_row[i+2:]+[\"\"]\n\tnew_row += [\"\"]*(len(row)-len(new_row))\n\treturn new_row if left else new_row[::-1]", "def copy_grid (grid):\r\n c=[]\r\n for i in range(len(grid)):\r\n c.append(grid[i])\r\n return eval(str(c))", "def toString(arr2d):\n return (\"\\n\".join(\"\\t\".join(row) for row in arr2d))", "def make_two_dim_list(number_of_lists, number_of_points_in_list):\r\n z = [[''] * number_of_points_in_list for i in range(number_of_lists)]\r\n return z", "def concat_lists(column):\n arrays = list_array(column)\n return np.concatenate(arrays)", "def listoflistToarray(l):\n max_dim=max([len(c) for c in l])\n all_array=[np.pad(c,(0,max_dim-len(c)),\"constant\",constant_values=(0,0)) for c in l]\n return np.array(all_array)", "def make_flat(list_of_lists: list) -> list:\n return sum([list(item) for item in list_of_lists], [])", "def merge(line):\n #Step1. Putting 0 to the end of the list.\n result = []\n for cell in line:\n if cell != 0:\n result.append(cell)\n for cell in range(line.count(0)):\n result.append(0)\n #Step2. Replaced with a tile of twice the value and a zero tile\n for cell in range(len(result)-1):\n if result[cell] == result[cell+1] and len(result) != 1:\n result[cell] += result[cell]\n result[cell+1] = 0\n #Step3. Repeat step1\n final_result = []\n for cell in result:\n if cell != 0:\n final_result.append(cell)\n for cell in range(result.count(0)):\n final_result.append(0)\n return final_result", "def rowmatrixlist(inputlist=None, converter=proper, func=None, fake=False, clean=False):\n if inputlist is None:\n inputlist = []\n if func is None:\n func = rowmatrixlist\n outlist = []\n for item in inputlist:\n if islist(item):\n item = func(item)\n if not clean or not isnull(item):\n outlist.append(item)\n out = matrix(1, len(outlist), converter=converter, fake=fake)\n for x in xrange(0, len(outlist)):\n out.store(0,x, outlist[x])\n return out", "def maplist(f, xs):\n return list(map(f, xs))", "def rearrange_batch(batch):\n return list(zip(*batch))", "def list_to_str( L ):\n if len(L) == 0: return ''\n return L[0] + list_to_str( L[1:] )", "def list_to_str( L ):\n if len(L) == 0: return ''\n return L[0] + list_to_str( L[1:] )", "def parse3DList(self,string):\r\n string = string.replace(\"[\",\"\")\r\n string = string.replace(\"]],\", \"**\")\r\n string = string.replace(\"],\",\"*\")\r\n string = string.replace(\"]\", \"\")\r\n string = string.split(\"**\")\r\n temp = []\r\n for i in string:\r\n temp.append(i.split(\"*\"))\r\n string = copy.deepcopy(temp)\r\n for i in xrange(len(string)):\r\n for j in xrange(len(string[i])):\r\n string[i][j] = string[i][j].split(\",\")\r\n for i in xrange(len(string)):\r\n for j in xrange(len(string[i])):\r\n for k in xrange(len(string[i][j])):\r\n string[i][j][k] = float(string[i][j][k])\r\n string[i][j] = list(string[i][j])\r\n return string", "def format_matrix(x):\n return ' '.join([format_vector(y) for y in x])", "def _collate_fn(batch):\r\n batch = list(zip(*batch))\r\n batch[0] = torch.stack(batch[0])\r\n batch[1] = list(batch[1])\r\n batch[2] = torch.stack(batch[2])\r\n return tuple(batch)", "def get_right(lst: 'list[Cell]', size: int) -> 'list[Line]':\n form = {1: [[1], [0, 2]],\n 2: [[1, 4], [0, 3, 6], [2, 5]],\n 3: [[1, 4, 8], [0, 3, 7, 11], [2, 6, 10], [5, 9]],\n 4: [[1, 4, 8, 13], [0, 3, 7, 12, 17], [2, 6, 11, 16], [5, 10, 15],\n [9, 14]],\n 5: [[1, 4, 8, 13, 19], [0, 3, 7, 12, 18, 24], [2, 6, 11, 17, 23],\n [5, 10, 16, 22], [9, 15, 21], [14, 20]]}\n result = []\n for line in form[size]:\n result.append(Line([lst[n] for n in line]))\n return result", "def bytes2matrix(text):\n return [list(text[i:i+4]) for i in range(0, len(text), 4)]", "def translate_board(board):\n\n return np.array(board).reshape(6, 7).tolist()", "def string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]", "def _fancy_to_raw(sheng):\n raw_sheng = []\n for entry in sheng:\n raw_entry = list(entry[:6])\n raw_entry[0] += 1\n raw_entry[1] += 1\n raw_entry[2] += 1\n raw_sheng.append(raw_entry)\n\n return raw_sheng", "def sample_list_to_str_2dlist(sample_list, oov_lists, idx2word, vocab_size, eos_idx, delimiter_word, unk_idx=None, replace_unk=False, src_str_list=None, separate_present_absent=False, present_absent_delimiter_word=None):\n pred_str_2dlist = [] # a 2dlist, len(pred_str_2d_list)=batch_size, len(pred_str_2d_list[0])=\n for sample, oov, src_word_list in zip(sample_list, oov_lists, src_str_list):\n # sample['prediction']: list of 0-dim tensor, len=trg_len\n # sample['attention']: tensor with size [trg_len, src_len]\n word_list = prediction_to_sentence(sample['prediction'], idx2word, vocab_size, oov, eos_idx, unk_idx, replace_unk, src_word_list, sample['attention'])\n #print(sample['prediction'])\n #print(\"The word list is:\",word_list)\n pred_str_list = split_word_list_by_delimiter(word_list, delimiter_word, separate_present_absent, present_absent_delimiter_word)\n #pred_str_list = split_concated_keyphrases(word_list, delimiter_word)\n #print(\"The delimiter is:\",idx2word)\n #print(\"The word list is:\",pred_str_list)\n #sys.exit()\n pred_str_2dlist.append(pred_str_list)\n return pred_str_2dlist", "def mk_sql_list(ls):\n res = \"(\" + ' '.join([str(elem) for elem in intersperse(\",\", ls)]) + \")\"\n return res", "def disperse_string(solid_string):\r\n normal_list = list(solid_string)\r\n return list(itertools.chain.from_iterable(zip(normal_list, [0] * len(normal_list))))", "def transpose(input: str) -> str:\n # If there is no matrix then returns empty string\n # This evaluation has a complexity of O(1)\n if len(input) == 0:\n return ''\n\n # This split has a complexity of O(N)\n rows = input.split('\\n')\n\n # Save max row len to know how to fit\n # This has a complexity of O(N) too\n max_row_len = max(len(row) for row in rows)\n\n # Fix incomplete rows adding spaces until it completes the max_row_len\n # It has also a complexity of O(N)\n fixed_rows = [row + ' ' * (max_row_len - len(row)) for row in rows]\n\n transposed_rows = []\n\n # Iterates rows, with X = len(rows) and Y = max_row_len\n # I recognize this is not the best approach since this is O(N^2), but\n # I tried to do it with zip() with no success...\n # I think zip internally does a similar action in O(N^2)\n for j in range(max_row_len):\n transposed_row = ''\n spaces = ''\n for i in range(len(rows)):\n # This space validation avoids adding spaces to right, it takes\n # advantage of the current iteration to avoid another innecesarly\n # O(N^2) iteration\n if fixed_rows[i][j] != ' ':\n transposed_row += spaces + fixed_rows[i][j]\n spaces = ''\n else:\n spaces += ' '\n\n transposed_rows.append(transposed_row)\n\n return '\\n'.join(transposed_rows)", "def twoDize(array, width):\n count = 0\n output = []\n temp = []\n while len(array) > 0:\n temp.append(array.pop())\n if len(temp) == width:\n output.append(temp)\n temp = []\n return output", "def question_two():\n # [[][][]]\n x = [[]]*3\n #[[a],[a],[a]]\n x[0].append('a')\n #[[a, b],[a, b],[a, b]]\n x[1].append('b')\n #[[a, b, c],[a, b, c],[a, b, c]]\n x[2].append('c')\n #[[d],[a, b, c],[a, b, c]]\n x[0] = ['d']", "def convert_list(l):\r\n l = [list(elem) for elem in l]\r\n return l", "def print_row(l):\n if type(l) is list:\n return \" \".join(l)\n else:\n return l" ]
[ "0.66831225", "0.6467971", "0.6433592", "0.62176746", "0.61118764", "0.59531605", "0.59194076", "0.5904703", "0.5842135", "0.58245844", "0.5760106", "0.57579666", "0.573827", "0.5714012", "0.5692371", "0.569116", "0.56879634", "0.56724775", "0.56658834", "0.5663291", "0.56433153", "0.563448", "0.5630856", "0.5618246", "0.56127036", "0.55674875", "0.5523864", "0.5523632", "0.55214334", "0.55122393", "0.55092466", "0.5508736", "0.54984856", "0.54874027", "0.5457054", "0.54548055", "0.5454761", "0.54083014", "0.5394291", "0.5386792", "0.53641504", "0.5359174", "0.5353637", "0.53337884", "0.5332996", "0.5323078", "0.53202075", "0.53202075", "0.5317615", "0.5303488", "0.5302374", "0.5301117", "0.52819526", "0.52779233", "0.5269997", "0.5252647", "0.52231824", "0.5221832", "0.5217062", "0.521201", "0.52116084", "0.520322", "0.520308", "0.5201632", "0.51922", "0.51887214", "0.51873165", "0.51844114", "0.51752853", "0.51750124", "0.51742125", "0.5173219", "0.51687247", "0.5161773", "0.5158302", "0.51504374", "0.51476604", "0.51269203", "0.5126822", "0.51239073", "0.51104456", "0.5100493", "0.5090529", "0.5090529", "0.5083562", "0.507746", "0.50772387", "0.5070204", "0.5067708", "0.5063817", "0.5062376", "0.5055873", "0.50460815", "0.50447786", "0.5042201", "0.5040811", "0.50401855", "0.50387454", "0.5031688", "0.50226873" ]
0.576872
10
Converts Penn Treebank tags to WordNet.
def penn2morphy(penntag): morphy_tag = {'NN':'n', 'JJ':'a', 'VB':'v', 'RB':'r'} try: return morphy_tag[penntag[:2]] except: return 'n'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def penn_to_wn(self,tag):\n if tag.startswith('N'):\n return 'n'\n \n if tag.startswith('V'):\n return 'v'\n \n if tag.startswith('J'):\n return 'a'\n \n if tag.startswith('R'):\n return 'r'\n \n return None", "def penn_to_wn(tag):\r\n if tag.startswith('N'):\r\n return 'n'\r\n \r\n if tag.startswith('V'):\r\n return 'v'\r\n \r\n if tag.startswith('J'):\r\n return 'a'\r\n \r\n if tag.startswith('R'):\r\n return 'r'\r\n \r\n return 'n'", "def penn_to_wn(tag):\r\n if tag.startswith('N'):\r\n return 'n'\r\n \r\n if tag.startswith('V'):\r\n return 'v'\r\n \r\n if tag.startswith('J'):\r\n return 'a'\r\n \r\n if tag.startswith('R'):\r\n return 'r'\r\n \r\n return 'n'", "def get_wordnet_pos(treebank_tag):\n\n if treebank_tag == 'NNP':\n return wordnet.NOUN, 'proper'\n\n # JJ-adjective\n # JJR-adjective, comparative\n # JJS-adjective, superlative\n elif treebank_tag.startswith('J'):\n return wordnet.ADJ, 'adj'\n\n # VB-verb, base form\n # VBD-verb, past tense\n # VBG-verb, gerund or present participle; VBN-verb, past participle\n # VBP-verb, non-3rd person singular present\n # VBZ-verb, 3rd person singular present\n elif treebank_tag.startswith('V'):\n return wordnet.VERB, 'verb'\n\n # RB-adverb\n # RBR-adverb, comparative\n # RBS-adverb, superlative\n # RP-particle\n elif treebank_tag.startswith('R'):\n return wordnet.ADV, 'adv'\n\n # NN-noun\n elif treebank_tag.startswith('N'):\n return wordnet.NOUN, 'noun'\n\n # default\n else:\n return wordnet.NOUN, ''", "def penn_to_wn(self,tag):\n if tag.startswith('N'):\n return 'n'\n\n if tag.startswith('V'):\n return 'v'\n\n if tag.startswith('J'):\n return 'a'\n\n if tag.startswith('R'):\n return 'r'\n\n return None", "def ner_nltk(filepath):\n\n out = \"\"\n\n with codecs.open(filepath,'r','utf-8') as current_file:\n\n text = current_file.readlines()\n\n with codecs.open(filepath+\".ner\",'w','utf-8') as outfile:\n\n for line in text:\n\n tokenized = line.split()\n tagged = pos_tag(tokenized)\n ne = ne_chunk(tagged)\n\n for index,token in enumerate(ne):\n if type(token) != tuple:\n outfile.write(' '.join([tok[0]+\"|\"+token.label() for tok in token])+' ')\n else:\n outfile.write(token[0]+' ')\n outfile.write('\\n')", "def penn_to_wn(tag):\n if tag.startswith('N'):\n return 'n'\n \n if tag.startswith('V'):\n return 'v'\n \n if tag.startswith('J'):\n return 'a'\n \n if tag.startswith('R'):\n return 'r'\n \n return None", "def penn_to_wn(tag):\n if tag.startswith('N'): return 'n'\n if tag.startswith('V'): return 'v'\n if tag.startswith('J'): return 'a'\n if tag.startswith('R'): return 'r'\n return None", "def penn_to_wn(tag):\n if tag.startswith('N'):\n return 'n'\n if tag.startswith('V'):\n return 'v'\n if tag.startswith('J'):\n return 'a'\n if tag.startswith('R'):\n return 'r'\n return None", "def tag_to_wordnet(tag):\n if (tag == 'ADJ'): return('a')\n elif (tag == 'ADV'): return('r')\n elif (tag == 'NOUN'): return('n')\n elif (tag == 'VERB'): return('v')\n else: return None", "def penn_to_wn(tag):\n if tag.startswith('N'):\n return 'n'\n\n if tag.startswith('V'):\n return 'v'\n\n if tag.startswith('J'):\n return 'a'\n\n if tag.startswith('R'):\n return 'r'\n\n return None", "def get_wordnet_pos(wn, treebank_tag):\n\n if treebank_tag.startswith('J'):\n return wn.ADJ\n elif treebank_tag.startswith('V'):\n return wn.VERB\n elif treebank_tag.startswith('N'):\n return wn.NOUN\n elif treebank_tag.startswith('R'):\n return wn.ADV\n else:\n return wn.NOUN", "def penn_to_wn(tag):\n if tag.startswith('J'):\n return wn.ADJ\n elif tag.startswith('N'):\n return wn.NOUN\n elif tag.startswith('R'):\n return wn.ADV\n elif tag.startswith('V'):\n return wn.VERB\n return wn.NOUN # None", "def penn_to_wn(tag):\n if tag.startswith('J'):\n return wn.ADJ\n elif tag.startswith('N'):\n return wn.NOUN\n elif tag.startswith('R'):\n return wn.ADV\n elif tag.startswith('V'):\n return wn.VERB\n return None", "def get_wordnet_pos(treebank_tag):\n if(treebank_tag.startswith('J')):\n return wordnet.ADJ\n elif(treebank_tag.startswith('V')):\n return wordnet.VERB\n elif(treebank_tag.startswith('N')):\n return wordnet.NOUN\n elif(treebank_tag.startswith('R')):\n return wordnet.ADV\n else:\n return wordnet.NOUN", "def get_wordnet_pos(treebank_tag):\n\n if treebank_tag.startswith('J'):\n return wordnet.ADJ\n elif treebank_tag.startswith('V'):\n return wordnet.VERB\n elif treebank_tag.startswith('N'):\n return wordnet.NOUN\n elif treebank_tag.startswith('R'):\n return wordnet.ADV\n else:\n return wordnet.NOUN", "def get_wordnet_pos(treebank_tag):\n if treebank_tag.startswith('J'):\n return wordnet.ADJ\n elif treebank_tag.startswith('V'):\n return wordnet.VERB\n elif treebank_tag.startswith('N'):\n return wordnet.NOUN\n elif treebank_tag.startswith('R'):\n return wordnet.ADV\n elif treebank_tag.startswith('S'):\n return wordnet.ADJ\n else:\n return wordnet.NOUN", "def map_postags(treebank_tag):\n\n if treebank_tag.startswith('J'):\n return \"a\"\n elif treebank_tag.startswith('V'):\n return \"v\"\n elif treebank_tag.startswith('N'):\n return \"n\"\n elif treebank_tag.startswith('R'):\n return \"r\"\n else:\n return 'n'", "def get_wordnet_pos(treebank_tag):\n if treebank_tag.startswith('J'):\n return wordnet.ADJ\n elif treebank_tag.startswith('V'):\n return wordnet.VERB\n elif treebank_tag.startswith('N'):\n return wordnet.NOUN\n elif treebank_tag.startswith('R'):\n return wordnet.ADV\n else:\n # As default pos in lemmatization is Noun\n return wordnet.NOUN", "def get_wordnet_pos(treebank_tag):\n if treebank_tag.startswith('J'):\n return wordnet.ADJ\n elif treebank_tag.startswith('V'):\n return wordnet.VERB\n elif treebank_tag.startswith('N'):\n return wordnet.NOUN\n elif treebank_tag.startswith('R'):\n return wordnet.ADV\n else:\n return wordnet.NOUN # If unknown, return the default value", "def get_nouns(self):\n\t\tblob = TextBlob(self.raw_string)\n\n\t\tfor word, tag in blob.tags:\n\t\t\tif tag in ['NNP', 'NN']:\n\t\t\t\tself.nouns.append(word.lemmatize())", "def convert(tag):\r\n if is_noun(tag):\r\n return wn.NOUN\r\n if is_adjective(tag):\r\n return wn.ADJ", "def nodes_to_nags(self, nags):\n out = []\n for n in nags:\n out.append(n.text.strip(' '))\n return out", "def convert_pos_tag(tag):\n # Source: https://www.programcreek.com/python/example/91610/nltk.corpus.wordnet.NOUN\n if tag in ['JJ', 'JJR', 'JJS']:\n return ADJ\n elif tag in ['RB', 'RBR', 'RBS']:\n return ADV\n elif tag in ['NN', 'NNS', 'NNP', 'NNPS']:\n return NOUN\n elif tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']:\n return VERB\n return NOUN", "def create_tagger():\n train_sents = brown.tagged_sents()\n\n # These regexes were lifted from the NLTK book tagger chapter.\n t0 = nltk.RegexpTagger(\n [(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers\n (r'(The|the|A|a|An|an)$', 'AT'), # articles\n (r'.*able$', 'JJ'), # adjectives\n (r'.*ness$', 'NN'), # nouns formed from adjectives\n (r'.*ly$', 'RB'), # adverbs\n (r'.*s$', 'NNS'), # plural nouns\n (r'.*ing$', 'VBG'), # gerunds\n (r'.*ed$', 'VBD'), # past tense verbs\n (r'.*', 'NN') # nouns (default)\n ])\n t1 = nltk.UnigramTagger(train_sents, backoff=t0)\n t2 = nltk.BigramTagger(train_sents, backoff=t1)\n t3 = nltk.TrigramTagger(train_sents, backoff=t2)\n return t3", "def nltk_get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n # Convert NOTK to wordnet POS notations\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n return tag_dict.get(tag, wordnet.NOUN) # Default to noun if not found", "def pos_tag(sentence, model_path=None, verbose=False):\r\n \r\n tnt_bin = config_tnt(verbose=verbose)\r\n \r\n if not model_path:\r\n model_path = '%s/models/wsj' % tnt_bin[:-4]\r\n \r\n input_file = '%s/tnt_in.txt' % tnt_bin[:-4]\r\n output_file = '%s/tnt_out.txt' % tempfile.gettempdir()\r\n \r\n execute_string = '%s %s %s > %s'\r\n if not verbose:\r\n execute_string += ' 2> %s/tnt.out' % tempfile.gettempdir()\r\n \r\n tagged_words = []\r\n \r\n f = None\r\n try:\r\n if verbose: \r\n print 'Begin input file creation' \r\n print 'input_file=%s' % input_file\r\n\r\n f = open(input_file, 'w')\r\n words = tokenize.WhitespaceTokenizer().tokenize(sentence)\r\n for word in words:\r\n f.write('%s\\n' % word)\r\n f.write('\\n')\r\n f.close()\r\n if verbose: print 'End input file creation'\r\n \r\n if verbose:\r\n print 'tnt_bin=%s' % tnt_bin \r\n print 'model_path=%s' % model_path\r\n print 'output_file=%s' % output_file\r\n \r\n execute_string = execute_string % (tnt_bin, model_path, input_file, output_file)\r\n \r\n if verbose: \r\n print 'execute_string=%s' % execute_string\r\n \r\n if verbose: print 'Begin tagging'\r\n tnt_exit = os.system(execute_string)\r\n if verbose: print 'End tagging (exit code=%s)' % tnt_exit\r\n \r\n f = open(output_file, 'r')\r\n lines = f.readlines()\r\n f.close()\r\n\r\n tagged_words = []\r\n tokenizer = tokenize.WhitespaceTokenizer()\r\n for line in lines:\r\n if not line.startswith('%%'):\r\n tokens = tokenizer.tokenize(line.strip())\r\n if len(tokens) == 2:\r\n tagged_words.append((tokens[0], tokens[1]))\r\n \r\n if verbose:\r\n for tag in tagged_words:\r\n print tag\r\n\r\n finally:\r\n if f: f.close()\r\n\r\n return tagged_words", "def NOAD_to_wordnet(data):\r\n NOAD_to_wordnet = {}\r\n with open(algorithmic_map, 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n noad, wordnet = line.split()\r\n NOAD_to_wordnet[noad] = wordnet\r\n with open(manual_map, 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n noad, wordnet = line.split()\r\n NOAD_to_wordnet[noad] = wordnet\r\n \r\n count = 0\r\n for elem in data: \r\n if elem[\"is_target\"]:\r\n if elem[\"sense\"] not in NOAD_to_wordnet:\r\n count += 1\r\n continue\r\n noad_sense = elem[\"sense\"]\r\n elem[\"sense\"] = NOAD_to_wordnet[noad_sense]\r\n print(\"NOAD sense not in mapping text: %d\" %count)\r\n return data", "def tag_tnt(self, untagged_string: str):\n untagged_tokens = wordpunct_tokenize(untagged_string)\n tagger = self._load_model(\"tnt\")\n tagged_text = tagger.tag(untagged_tokens)\n return tagged_text", "def penn2morphy(self,penntag):\r\n morphy_tag = {'NN':'n', 'JJ':'a',\r\n 'VB':'v', 'RB':'r'}\r\n try:\r\n return morphy_tag[penntag[:2]]\r\n except:\r\n return 'n'", "def __tagsToNgrams__(self):\n bigrams = defaultdict(int)\n trigrams = defaultdict(int)\n for tags in self.getTags():\n tags = list(tags)\n for i in range(2):\n tags.insert(0, BEGIN)\n for k in range(2, len(tags)):\n trigrams[tuple(tags[k-2:k+1])] += 1\n bigrams[tuple(tags[k-1:k+1])] += 1\n return bigrams, trigrams", "def _get_wordnet_pos(self, tag):\n tag = tag[0].upper()\n \n if tag == \"J\":\n return wordnet.ADJ\n elif tag == \"N\":\n return wordnet.NOUN\n elif tag == \"V\":\n return wordnet.VERB\n elif tag == \"R\":\n return wordnet.ADV\n else:\n return wordnet.NOUN", "def pos_treebank(data_word):\n #returns dict\n w_pos_treebank = nltk.pos_tag(data_word)\n w_pos_treebank = dict(w_pos_treebank)\n return w_pos_treebank", "def word_tag(self, word):\n if word[1] in (\"NN\", \"NNS\", \"NNP\", \"NNPS\"):\n return _wordnet.NOUN\n if word[1] in (\"JJ\", \"JJR\", \"JJS\"):\n return _wordnet.ADJ\n if word[1] in (\"VB\", \"VBD\", \"VBG\", \"VBN\", \"VBP\", \"VBZ\"):\n return _wordnet.VERB\n if word[1] in (\"RB\", \"RBR\", \"RBS\"):\n return _wordnet.ADV\n\n return None", "def correctDefaultNounTag(idx, tagged_term, tagged_terms, lexicon):\n term, tag, norm = tagged_term\n if tag == 'NND':\n if term.endswith('s'):\n tagged_term[1] = 'NNS'\n tagged_term[2] = term[:-1]\n else:\n tagged_term[1] = 'NN'", "def _visualize_nltk(self):\n nltk_tree = self.as_nltk_tree()\n import nltk\n nltk.draw.tree.draw_trees(nltk_tree)", "def convert_to_t5_format(nlp, texts):\n\n inputs = []\n outputs = []\n original_texts = []\n\n for text, doc in zip(texts, nlp.pipe(texts, n_process=-1)):\n\n pairs = set()\n\n for chunk in doc.noun_chunks:\n if chunk.text == text:\n continue\n input_ = text[0 : chunk.start_char] + \"<extra_id_0> \" + text[chunk.end_char + 1 :]\n output = \"<extra_id_0> \" + chunk.text + \" <extra_id_1> </s>\"\n\n pairs.add((input_.strip(), output))\n\n for token in doc:\n left_edge_i = token.left_edge.i\n right_edge_i = token.right_edge.i\n chunk_length = right_edge_i - left_edge_i + 1\n if chunk_length / len(doc) > 0.5 or chunk_length > 10: # if chunk is too long, just skip it\n continue\n\n input_ = str(doc[:left_edge_i]) + \" <extra_id_0> \" + str(doc[right_edge_i + 1 :])\n output = \"<extra_id_0> \" + str(doc[left_edge_i : right_edge_i + 1]) + \" <extra_id_1> </s>\"\n\n pairs.add((input_.strip(), output))\n\n for token in doc:\n if token.pos_ in [\"NOUN\", \"PRON\", \"PROPN\"]: # we don't want to mask parts of noun chunks\n continue\n input_ = str(doc[: token.i]) + \" <extra_id_0> \" + str(doc[token.i + 1 :])\n output = \"<extra_id_0> \" + token.text + \" <extra_id_1> </s>\"\n\n pairs.add((input_.strip(), output))\n\n for (input_, output) in pairs:\n inputs.append(input_)\n outputs.append(output)\n original_texts.append(text)\n\n return inputs, outputs, original_texts", "def nltk_tree(self):\n return nltk_tree(self)", "def gettag(query, lemmatag = False):\n import re\n if lemmatag is False:\n tag = 'n' # same default as wordnet\n # attempt to find tag from tregex query\n tagfinder = re.compile(r'^[^A-Za-z]*([A-Za-z]*)')\n tagchecker = re.compile(r'^[A-Z]{1,4}$')\n treebank_tag = re.findall(tagfinder, query.replace(r'\\w', '').replace(r'\\s', '').replace(r'\\b', ''))\n if re.match(tagchecker, treebank_tag[0]):\n if treebank_tag[0].startswith('J'):\n tag = 'a'\n elif treebank_tag[0].startswith('V') or treebank_tag[0].startswith('M'):\n tag = 'v'\n elif treebank_tag[0].startswith('N'):\n tag = 'n'\n elif treebank_tag[0].startswith('R'):\n tag = 'r'\n elif lemmatag:\n tag = lemmatag\n tagchecker = re.compile(r'^[avrn]$')\n while not re.match(tagchecker, lemmatag):\n time = strftime(\"%H:%M:%S\", localtime())\n selection = raw_input('\\n%s: WordNet POS tag \"%s\" not recognised.\\n It must be:\\n\\n ' \\\n ' a: (adjective)' \\\n ' n: (noun)' \\\n ' r: (adverb)' \\\n ' v: (verb)\\n\\nYour selection: ' % (time, lemmatag))\n lemmatag = selection\n return tag", "def encode_tags(taglist, lang_name):\n tagvec = [None]*len(UNIMORPH_CATEGORIES)\n for tag in taglist:\n if tag in UNIMORPH_TAGTYPES:\n tagtype = UNIMORPH_TAGTYPES[tag]\n set_tagtype(tagvec, tagtype, tag, lang_name)\n\n if tag in UNIMORPH_POS_MAP:\n pos = UNIMORPH_POS_MAP[tag]\n set_tagtype(tagvec, \"POS\", pos, lang_name)\n\n return tagvec", "def create_network():\n net = ln.models.TinyYolo(CLASSES, CONF_THRESH, NMS_THRESH)\n\n net.load(args.weight)\n net.eval()\n net.postprocess.append(ln.data.transform.TensorToBrambox(NETWORK_SIZE, LABELS))\n net = net.to(device)\n return net", "def _preprocess(self, tagged: List[Tuple]) -> Tuple:\n ori = \" \".join([tag[0] for tag in tagged])\n tags = [tag[1] for tag in tagged]\n # Mapping into general tagset\n tags = [self._map[tag] if tag in self._map else \"X\" for tag in tags]\n return \" \".join(tags), ori", "def convert_nkjp(nkjp_path, output_dir):\n # Load XML NKJP\n print(\"Reading data from %s\" % nkjp_path)\n if os.path.isfile(nkjp_path) and (nkjp_path.endswith(\".tar.gz\") or nkjp_path.endswith(\".tgz\")):\n with tempfile.TemporaryDirectory() as nkjp_dir:\n print(\"Temporarily extracting %s to %s\" % (nkjp_path, nkjp_dir))\n with tarfile.open(nkjp_path, \"r:gz\") as tar_in:\n tar_in.extractall(nkjp_dir)\n\n subfolder_to_entities = load_xml_nkjp(nkjp_dir)\n elif os.path.isdir(nkjp_path):\n subfolder_to_entities = load_xml_nkjp(nkjp_path)\n else:\n raise FileNotFoundError(\"Cannot find either unpacked dataset or gzipped file\")\n converted = []\n for subfolder_name, pars in subfolder_to_entities.items():\n for par_id, par in pars.items():\n paragraph_identifier = f\"{subfolder_name}|{par_id}\"\n par_tokens = []\n for _, sent in par.items():\n tokens = sent.values()\n srt = sorted(tokens, key=lambda tok:tok[\"i\"])\n for token in srt:\n _ = token.pop(\"i\")\n _ = token.pop(\"seg_id\")\n par_tokens.append(token)\n par_tokens[0][\"paragraph_id\"] = paragraph_identifier\n converted.append(par_tokens)\n\n split = split_dataset(converted)\n\n for split_name, split in split.items():\n if split:\n with open(os.path.join(output_dir, f\"pl_nkjp.{split_name}.json\"), \"w\", encoding=\"utf-8\") as f:\n json.dump(split, f, ensure_ascii=False, indent=2)", "def extract_labels(filename,tag,one_hot):\n print('Extracting labels',filename)\n return extractdb_labels(filename,tag,one_hot=one_hot)", "def nlp_extract_tags(text, lang=None):\n\n sentences = nltk.sent_tokenize(text)\n tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]\n tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]\n chunked_sentences = nltk.batch_ne_chunk(tagged_sentences, binary=True)\n\n def extract_entity_names(t):\n entity_names = []\n\n if hasattr(t, 'node') and t.node:\n if t.node == 'NE':\n entity_names.append(' '.join([child[0] for child in t]))\n else:\n for child in t:\n entity_names.extend(extract_entity_names(child))\n\n return entity_names\n\n entity_names = []\n for tree in chunked_sentences:\n entity_names.extend(extract_entity_names(tree))\n\n result = {'tags': list(set(entity_names))}\n\n return jsonp({'status': 'ok', 'result': result})", "def _convert_tags_to_wordpiece_tags(tags: List[str], offsets: List[int]) -> List[str]:\n new_tags = []\n j = 0\n for i, offset in enumerate(offsets):\n tag = tags[i]\n is_o = tag == \"O\"\n is_start = True\n while j < offset:\n if is_o:\n new_tags.append(\"O\")\n\n elif tag.startswith(\"I\"):\n new_tags.append(tag)\n\n elif is_start and tag.startswith(\"B\"):\n new_tags.append(tag)\n is_start = False\n\n elif tag.startswith(\"B\"):\n _, label = tag.split(\"-\", 1)\n new_tags.append(\"I-\" + label)\n j += 1\n\n # Add O tags for cls and sep tokens.\n return [\"O\"] + new_tags + [\"O\"]", "def nltk_tree(sentence):\n from nltk import tree\n def do_pnp(pnp):\n # Returns the PNPChunk (and the contained Chunk objects) in NLTK bracket format.\n s = ' '.join([do_chunk(ch) for ch in pnp.chunks])\n return '(PNP %s)' % s\n \n def do_chunk(ch):\n # Returns the Chunk in NLTK bracket format. Recurse attached PNP's.\n s = ' '.join(['(%s %s)' % (w.pos, w.string) for w in ch.words])\n s+= ' '.join([do_pnp(pnp) for pnp in ch.attachments])\n return '(%s %s)' % (ch.type, s)\n \n T = ['(S']\n v = [] # PNP's already visited.\n for ch in sentence.chunked():\n if not ch.pnp and isinstance(ch, Chink):\n T.append('(%s %s)' % (ch.words[0].pos, ch.words[0].string))\n elif not ch.pnp:\n T.append(do_chunk(ch))\n #elif ch.pnp not in v:\n elif ch.pnp.anchor is None and ch.pnp not in v:\n # The chunk is part of a PNP without an anchor.\n T.append(do_pnp(ch.pnp))\n v.append(ch.pnp)\n T.append(')')\n return tree.bracket_parse(' '.join(T))", "def convert(tree) :\n kind = tree[0]\n\n if kind == \"dot\" :\n return \"dot\" \n elif kind == \"eol\" :\n return \"eol\"\n elif kind == \"char\" :\n return \"lit('\" + tree[1] + \"')\"\n elif kind == \"set\" :\n return \"oneof('\" + tree[1] + \"')\"\n elif kind == \"elem\" :\n if len(tree) >= 3 :\n return convert(tree[2]) \n else :\n return convert(tree[1])\n elif kind == \"basic\" :\n if len(tree) == 4 :\n return \"alt(\" + convert(tree[1]) + \",\" + convert(tree[3]) + \")\"\n elif len(tree) == 3 :\n return parse_single_op_string(tree[2]) + convert(tree[1]) + \")\"*len(tree[2])\n else :\n return convert(tree[1])\n elif kind == \"RE\" :\n if len(tree) == 3 and tree[2][1][0] != 'eol' :\n return \"seq(\" + convert(tree[1]) + \",\" + convert(tree[2]) + \")\"\n else :\n return convert(tree[1])\n else :\n print \"invalid node tag : {}\".format(kind)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n return tag_dict.get(tag, wordnet.NOUN)", "def extract_english_raw_texts():\n # conceptnet triples raw text\n cpnet_en_raw_text = []\n\n # conceptnet entity context\n cpnet_en_entity_context = []\n\n with open(conceptnet_path, encoding=\"utf8\") as f:\n for line in f.readlines():\n ls = line.split('\\t')\n if ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'):\n \"\"\"\n Some preprocessing:\n - Remove part-of-speech encoding.\n - Split(\"/\")[-1] to trim the \"/c/en/\" and just get the entity name, convert all to \n - Lowercase for uniformity.\n \"\"\"\n rel = ls[1].split(\"/\")[-1].lower()\n head = del_pos(ls[2]).split(\"/\")[-1].lower()\n tail = del_pos(ls[3]).split(\"/\")[-1].lower()\n\n if not head.replace(\"_\", \"\").replace(\"-\", \"\").isalpha():\n continue\n\n if not tail.replace(\"_\", \"\").replace(\"-\", \"\").isalpha():\n continue\n\n # transfer to raw text\n head_text = head.replace(\"_\", \" \")\n tail_text = tail.replace(\"_\", \" \")\n\n if rel not in relation_mapping:\n continue\n\n rel_text = relation_mapping[rel]\n raw_text = head_text + \" \" + rel_text + \" \" + tail_text\n cpnet_en_raw_text.append(raw_text)\n \n # split to train and test\n shuffle(cpnet_en_raw_text)\n train_size = int(len(cpnet_en_raw_text) * 0.9)\n cpnet_en_raw_train_text = cpnet_en_raw_text[:train_size]\n cpnet_en_raw_dev_text = cpnet_en_raw_text[train_size:]\n\n with open(conceptnet_en_raw_text_train_path, \"w\", encoding=\"utf8\") as f:\n f.write(\"\\n\".join(cpnet_en_raw_train_text))\n \n with open(conceptnet_en_raw_text_dev_path, \"w\", encoding=\"utf8\") as f:\n f.write(\"\\n\".join(cpnet_en_raw_dev_text))", "def get_wordnet_pos(word: str) -> Dict[str, Any]:\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def _convert_tags_to_wordpiece_tags(tags: List[str], offsets: List[int]) -> List[str]:\r\n new_tags = []\r\n j = 0\r\n for i, offset in enumerate(offsets):\r\n tag = tags[i]\r\n is_o = tag == \"O\"\r\n is_start = True\r\n while j < offset:\r\n if is_o:\r\n new_tags.append(\"O\")\r\n\r\n elif tag.startswith(\"I\"):\r\n new_tags.append(tag)\r\n\r\n elif is_start and tag.startswith(\"B\"):\r\n new_tags.append(tag)\r\n is_start = False\r\n\r\n elif tag.startswith(\"B\"):\r\n _, label = tag.split(\"-\", 1)\r\n new_tags.append(\"I-\" + label)\r\n j += 1\r\n\r\n # Add O tags for cls and sep tokens.\r\n return [\"O\"] + new_tags + [\"O\"]", "def get_tags_for_NOx_HONO(AllTags=False):\n diags = [\n # Version 6 tags\n 'ProdHNO2fromHvNIT', 'ProdHNO2fromHvNITs', 'ProdHNO2fromHvNITD1',\n 'ProdHNO2fromHvNITD2', 'ProdHNO2fromHvNITD3', 'ProdHNO2fromHvNITD4',\n 'ProdNO2fromHvNIT', 'ProdNO2fromHvNITs', 'ProdNO2fromHvNITD1',\n 'ProdNO2fromHvNITD2', 'ProdNO2fromHvNITD3', 'ProdNO2fromHvNITD4',\n 'ProdNO2fromHONO', 'ProdHNO2fromOHandNO', 'ProdHNO2fromHET',\n 'ProdNOnHO2ChannelA', 'ProdNOnHO2ChannelB',\n # Version 7 tags\n 'ProdHNO3fromNO2nOH','ProdNO3fromHNO3nOH',\n 'PhotNO2', 'PhotHNO3', 'PhotHNO2',\n 'ProdHNO3fromHetNO3', 'ProdNITfromHetNO3','ProdNITsfromHetNO3',\n ]\n prefix = 'TN{:0>3}'\n tags = [prefix.format(i+1) for i in range(len(diags))]\n # pair up numbering (so that runs with different diagnostics have same #s)?\n d = dict(zip(diags, tags))\n # Include the automatic tagging of NOx\n def mk_KPP_tag_from_rxn_str(rxn_str=None, search_str=None,\n prefix='ProdfromRXN', ):\n \"\"\"\n Create a variable for reaction\n \"\"\"\n reactants = rxn_str.split('=')[0]\n reactants = reactants.replace(' + ', '_n_')\n reactants = reactants.replace(' {+M} ', '_M_').strip()\n products = rxn_str.split('=')[-1]\n products = products.replace(' + ', '_n_')\n products = products.replace(' {+M} ', '_M_').strip()\n products = products.replace(' {+M}', '_M').strip()\n products = products[:10]\n # Return a new reaction string\n return'{}_{}_{}_to_{}'.format(prefix, search_str, reactants, products)\n\n if AllTags:\n DataRoot = get_local_folder('DataRoot')\n folder = '{}{}'.format(DataRoot, '/ARNA/Misc/')\n# FName = 'Tagged_reactions_in_Standard_v12.9.1_ARNA_v8_POx_tagged.csv'\n FName = 'Tagged_reactions_in_Standard_v12.9_ARNA_v9_PL_NOx_tagged.csv'\n df = pd.read_csv(folder+FName)\n# df['RxnName'] = df['rxn_str'].map(mk_KPP_tag_from_rxn_str)\n df['RxnName'] = df.apply(lambda x:\n mk_KPP_tag_from_rxn_str(rxn_str=x['rxn_str'],\n search_str = x['search_str'], ),\n axis=1)\n\n # combine into main dictionary\n d2 = dict(zip( df['RxnName'], df['tag'].values ) )\n d = AC.merge_two_dicts(d, d2)\n return d", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(self, word):\r\n tag = nltk.pos_tag([word])[0][1][0].upper()\r\n tag_dict = {\r\n \"J\": wordnet.ADJ,\r\n \"N\": wordnet.NOUN,\r\n \"V\": wordnet.VERB,\r\n \"R\": wordnet.ADV,\r\n }\r\n\r\n return tag_dict.get(tag, wordnet.NOUN)", "def as_nltk_tree(self):\n from .Utility import get_nltk_tree_reader_maybe\n read_nltk_tree = get_nltk_tree_reader_maybe()\n if not read_nltk_tree:\n raise ImportError(\"Unable to import nltk tree reading.\")\n nltk_tree = read_nltk_tree(str(self))\n return nltk_tree", "def _get_norm_tags(self, tags):\n norm_tags = []\n for tag in tags:\n lang = tag[0:2]\n norm_tags.append(lang + ':' + self.tag_manager.normalize_tag_wtokenization(tag, self.tries[lang]))\n return norm_tags", "def canonicalize(tags):\n # test format \n r = random.randint(0, len(tags)-1)\n\n # in multilabel format? each tag is in the form of [e1, e2, ...]\n isMultiLabel = True if hasattr(tags[r], '__iter__') else False\n\n if isMultiLabel: # i.e. each label is a list\n print('TDocTag.canonicalize> input labels in multilabel format.')\n docTags = []\n for i, tag in enumerate(tags): \n \n # docId = TDocTag.getDocID(i)\n docId = i # set docId here\n if tag[0] == docId: \n # do nothing, first element is already the intended docId\n pass \n else: \n tag.insert(0, docId)\n docTags.append(tag)\n else: \n docTags = []\n for i, tag in enumerate(tags): \n if i < 3: assert isinstance(tag, str)\n docId = i # docId = TDocTag.getDocID(i) \n docTags.append([docId, tag, ]) \n return docTags", "def filter_tree(tree):\n return find_noun(tree)\n\n # noun_phrase = re.match(\"NP|WHNP\", tree.parent().label())\n # noun = re.match(\"NN.*\", tree.label())\n # return noun_phrase and noun", "def xml2tokens(xml_tagged_sent, tokenized_sent, raw_sent):\n raw, entities = get_entities(xml_tagged_sent)\n if re.search(r\"ENAMEX\", raw):\n print(xml_tagged_sent)\n print(raw)\n # count += 1\n\n tokens, syllables = word_tokenize(tokenized_sent, raw_sent)\n level1_syl_tags = [\"O\" for i in range(len(syllables))]\n level2_syl_tags = [\"O\" for i in range(len(syllables))]\n level3_syl_tags = [\"O\" for i in range(len(syllables))]\n\n level1_token_tags = [\"O\" for i in range(len(tokens))]\n level2_token_tags = [\"O\" for i in range(len(tokens))]\n level3_token_tags = [\"O\" for i in range(len(tokens))]\n\n flag = False\n for entity in entities:\n value = entity[\"value\"]\n start = entity[\"start\"]\n end = entity[\"end\"]\n entity_type = entity[\"type\"]\n start_syl_id, end_syl_id = find_syl_index(start, end, syllables)\n start_tok_id, end_tok_id = find_tok_index(start_syl_id, end_syl_id, tokens)\n\n if start_syl_id != None and end_syl_id != None:\n if entity[\"level\"] == 1:\n level1_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level1_syl_tags[i] = \"I-\" + entity_type\n elif entity[\"level\"] == 2:\n level2_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level2_syl_tags[i] = \"I-\" + entity_type\n else:\n level3_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level3_syl_tags[i] = \"I-\" + entity_type\n else:\n print(\"{},{},\\\"{}\\\" in '{}' ({})\".format(start,end,value,raw,xml_tagged_sent))\n flag = True\n\n if start_tok_id != None and end_tok_id != None:\n if entity[\"level\"] == 1:\n level1_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id+1, end_tok_id):\n level1_token_tags[i] = \"I-\" + entity_type\n elif entity[\"level\"] == 2:\n level2_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id + 1, end_tok_id):\n level2_token_tags[i] = \"I-\" + entity_type\n else:\n level3_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id + 1, end_tok_id):\n level3_token_tags[i] = \"I-\" + entity_type\n else:\n pass\n # print(\"{},{},\\\"{}\\\" in '{}' ({})\".format(start_syl_id, end_syl_id, value, raw, xml_tagged_sent))\n\n ret_syllables = list(zip([ s.text for s in syllables], level1_syl_tags, level2_syl_tags, level3_syl_tags))\n ret_tokens = list(zip( [tk.text for tk in tokens], level1_token_tags, level2_token_tags, level3_token_tags))\n return ret_syllables, ret_tokens, raw, flag", "def __init__(self, original, no_ne_label = 'O', \n remove_bio_encoding = False):\n encoded_labels = []\n if not remove_bio_encoding:\n for label in ner_tags:\n encoded_labels.append(\"B-\" + label)\n encoded_labels.append(\"I-\" + label)\n else:\n encoded_labels = ner_tags\n\n self.original = original\n self.word = original\n self.label = no_ne_label\n if \"/\" in original:\n pos = original.rfind(\"/\")\n end = original[pos+1:]\n # remove parts of BIO encoding, e.g. remove \"B-\" from \"B-PER\" or \"I-\" from \"I-PER\"\n if remove_bio_encoding:\n end = end.replace(\"B-\", \"\").replace(\"I-\", \"\")\n if end in encoded_labels:\n self.word = original[0:pos]\n self.label = end\n #self._word_ascii = None\n self.feature_values = None", "def buildBTree(T, file):\r\n for line in file:\r\n word_line = line.split(' ')\r\n word = word_line[0]\r\n embedding = word_line[1:]\r\n embedding = [float(i) for i in embedding]\r\n if word[0].isalpha():\r\n word_emb_object = WordEmbedding.WordEmbedding(word, embedding)\r\n insertElement(T, word_emb_object)\r\n return T", "def fetch_the_corpora_using_NLTK():\n corpous_name = \"brown\"\n status = nltk.download(corpous_name)\n if (status):\n logging.info(\"Downloaded Brown corpus\")\n mdetok = TreebankWordDetokenizer()\n brown_natural = [mdetok.detokenize(' '.join(sent).replace('``', '\"').replace(\"''\", '\"').replace('`', \"'\").split()) for sent in brown.sents()]\n logging.info(\"Processed Brown corpus as text\")\n else:\n logging.error(\"Couldn't download the \"+ corpous_name+\" corpus\")\n \n return brown_natural", "def preprocess(docs):\r\n # stop = set(stopwords.words('english'))\r\n tags = {'NN', 'NNS', 'NNP', 'NNP', 'NNPS', 'JJ', 'JJR', 'JJS'}\r\n for i in range(len(docs)):\r\n docs[i] = [(word.lower(), convert(tag)) for (word, tag) in nltk.pos_tag(nltk.word_tokenize(docs[i])) if tag in tags]\r\n return lemmatize_docs(docs)", "def get_nnp_ngrams(original_text, highlight=4, minsize=0):\n minsize = minsize-1\n if minsize<0:\n minsize = 0 \n \n tokens = nltk.wordpunct_tokenize(original_text)\n tagged = nltk.word_tokenize(original_text)\n tagged = nltk.pos_tag(tokens)\n #for word in tagged:\n # print word\n doc_length = len(tokens)\n counter = 0\n counter2 = 0\n if highlight==0:\n concated_test = doc_length # This is set to doc_length but could be anything recommend 3.\n else:\n concated_test = highlight\n list_of_NNPs = []\n while counter < (doc_length-1):\n while counter2 < concated_test:\n counter2 = counter2+1\n counter3 = 0\n #print '--------------------'\n temp_array = []\n all_nnp = True\n while counter3 < counter2:\n if counter < (doc_length-counter3):\n #print tokens[counter+counter3],tagged[counter+counter3][1]\n temp_array.append(tokens[counter+counter3])\n if tagged[counter+counter3][1] != 'NNP':\n all_nnp = False\n counter3 = counter3+1\n counter3 = 0\n if all_nnp == True:\n if(len(temp_array)>minsize):\n list_of_NNPs.append(temp_array)\n #print 'added to main array'\n #else:\n #print 'not all NNPs'\n counter2 = 0\n counter = counter+1\n #for ngram in list_of_NNPs:\n # print ngram\n import itertools\n list_of_NNPs.sort()\n unique_NNPs = list(list_of_NNPs for list_of_NNPs,_ in itertools.groupby(list_of_NNPs))\n return unique_NNPs", "def convertVBtoJJ(POS_tag, vb_docs):\n wanted_POS = ['VBN', 'VBD','VBG']\n for i, word in enumerate(POS_tag):\n if word[1] in wanted_POS:\n if vb_docs.loc[vb_docs[word[1]] == word[0], 'JJ'] is not None:\n sub_vb = vb_docs.loc[vb_docs[word[1]] == word[0], 'JJ']\n if len(sub_vb) > 0:\n POS_tag[i] = (sub_vb.get_values()[0], 'JJ')\n return POS_tag", "def fit_nltk(self, X):\n self.clf_nltk = nltk.NaiveBayesClassifier.train(X)", "def tag_perceptron(self, untagged_string: str):\n untagged_tokens = wordpunct_tokenize(untagged_string)\n tagger = self._load_model(\"perceptron\")\n tagged_text = tagger.tag(untagged_tokens)\n return tagged_text", "def normalize_word(self, word, treebank_tag):\n wordnet_pos, part_of_speech = self.get_wordnet_pos(treebank_tag)\n\n if wordnet_pos == wordnet.NOUN and part_of_speech == 'proper':\n return word, 'proper_noun'\n\n lemword = self.wordnetlemmatize.lemmatize(word, wordnet_pos)\n return self.stemmer.stem(lemword), part_of_speech", "def sent2words(sent):\n return pos_tag(word_tokenize(sent))", "def word_to_ngrams(self, word):\n encoding = list()\n n = self.n\n if word == self.eos or word == self.sos:\n encoding.append(self.ngram_to_id[word])\n else:\n _word = '^' + word + '$'\n for i in range(len(_word) - n + 1):\n ngram = _word[i:i + n]\n if ngram in self.ngram_to_id:\n encoding.append(self.ngram_to_id[ngram])\n else:\n for ch in ngram:\n flag = 1\n if ch in self.unk_char_list:\n flag = random.randint(0, 1)\n if ch in self.ngram_to_id and flag == 1:\n encoding.append(self.ngram_to_id[ch])\n else:\n encoding.append(self.ngram_to_id['<unk>'])\n return encoding", "def get_nouns(root):\n nouns = []\n for child in root.findall(\"./xdrs/taggedtokens/tagtoken/tags\"):\n noun = False\n for grandchildren in child.findall(\"./tag[@type='pos']\"):\n if grandchildren.text == 'NN' or grandchildren.text == 'NNS':\n noun = True\n if noun == True:\n for grandchildren in child.findall(\"./tag[@type='lemma']\"):\n nouns.append(grandchildren.text)\n return nouns", "def retrieve_wordnet():\n try:\n from nltk.corpus import wordnet\n except:\n import nltk\n nltk.download('wordnet')\n from nltk.corpus import wordnet\n\n return wordnet", "def pos_tag(\n words: List[str], engine: str = \"perceptron\", corpus: str = \"orchid\"\n) -> List[Tuple[str, str]]:\n _corpus = corpus\n _tag = []\n if corpus == \"orchid_ud\":\n corpus = \"orchid\"\n if not words:\n return []\n\n if engine == \"perceptron\":\n from .perceptron import tag as tag_\n elif engine == \"artagger\":\n tag_ = _artagger_tag\n else: # default, use \"unigram\" (\"old\") engine\n from .unigram import tag as tag_\n _tag = tag_(words, corpus=corpus)\n\n if _corpus == \"orchid_ud\":\n _tag = _orchid_to_ud(_tag)\n\n return _tag", "def normalize(\n self,\n text: str,\n n_tagged: int,\n punct_pre_process: bool = True,\n punct_post_process: bool = True,\n verbose: bool = False,\n ) -> str:\n if punct_pre_process:\n text = pre_process(text)\n text = text.strip()\n if not text:\n if verbose:\n print(text)\n return text\n\n text = pynini.escape(text)\n if n_tagged == -1:\n tagged_texts = rewrite.rewrites(text, self.tagger.fst)\n else:\n tagged_texts = rewrite.top_rewrites(text, self.tagger.fst, nshortest=n_tagged)\n\n if self.lang == 'en':\n normalized_texts = tagged_texts\n else:\n normalized_texts = []\n for tagged_text in tagged_texts:\n self._verbalize(tagged_text, normalized_texts)\n\n if len(normalized_texts) == 0:\n raise ValueError()\n if punct_post_process:\n normalized_texts = [post_process_punctuation(t) for t in normalized_texts]\n normalized_texts = set(normalized_texts)\n return normalized_texts", "def _from_etree_to_tree(self, lang='en-US'):\n #clear existing tree\n# for i in self.tree.get_children():\n# self.tree.delete(i)\n self.tree.delete(*self.tree.get_children())\n #now insert old tree\n for category in self.trout:\n tagged = category.get('tags')\n if tagged is None:\n tagged = \"('{}',)\".format(category.tag)\n if tagged[-1] == ')':\n inserttext = tagged[2:3].upper() + tagged[3:tagged.find(')')-2]\n else:\n inserttext = tagged[1:2].upper() + tagged[2:-1]\n #messagebox.showwarning('_from_etree_to_tree', \"{}, {}\".format(lang, inserttext))\n thiscategory = self.tree.insert('', 'end', iid=inserttext.lower(), values=['', ''], \\\n text=LOCALIZED_TEXT[lang][inserttext], tags=\"{}\".format(inserttext.lower()))\n for term in category:\n values = eval(term.get('values'))\n tags = term.get('tags')\n# messagebox.showwarning('_from_etree_to_tree', \"{}, {}\".format(values, tags))\n thisterm = self.tree.insert(thiscategory, 'end')\n self.tree.item(thisterm, tags=term.get('tags'))\n self.tree.item(thisterm, text=term.text)\n self.tree.item(thisterm, values=[str(values[0]), str(values[1])])\n# tags=term.get('tags'))\n for rendering in term:\n thisrendering = self.tree.insert(thisterm, 'end', \\\n text=rendering.text, values=term.get('values'), \\\n tags=rendering.get('tags'))\n self.tree.tag_configure('approved', background='palegreen')\n self.tree.tag_configure('conflict', background='bisque')\n self.tree.tag_configure('suggestions', background='lightblue')\n self.tree.tag_configure('unknown', background='whitesmoke')\n self.tree.tag_configure('cldr', background='violet')\n self.tree.update() \n pass", "def get_wordnet_pos(tag):\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag.upper(), wordnet.NOUN)", "def convert_int_data(lines):\n tagdict = load('help/tagsets/upenn_tagset.pickle')\n list_tags = list(tagdict.keys()) # Get the list of all the tags.\n X, Y = [], [] # Creation of the array\n for j in range(len(lines)):\n line = lines[j]\n if len(line) >= 5: # We want the word in the middle of five words\n index = np.random.random_integers(low=2, high=len(line) - 3) # Take the index of the word to be choosen\n neighbours_words = [line[i] for i in (index - 2, index - 1, index + 1, index + 2)] # Extract the words\n Y.append(one_hot_encoding(lines[j][index], list_tags)) # Append the target to the array\n sample = []\n for word in neighbours_words:\n sample.append(one_hot_encoding(word, list_tags).tolist())\n X.append(sample) # Append the 4 neighbouring words\n\n return np.array(X), np.array(Y)", "def petit_nettoyage(ligne, lem_v=True, lem_n=True, len_elt=2, stopw=[]):\n lemmatizer = WordNetLemmatizer()\n for elt in ligne:\n if elt in (string.punctuation + string.digits):\n ligne = ligne.replace(elt, \" \")\n if lem_v and lem_n:\n liste = [\n lemmatizer.lemmatize(elt, pos=\"v\")\n for elt in ligne.split()\n if lemmatizer.lemmatize(elt, pos=\"v\") not in stopw\n ]\n liste = [\n lemmatizer.lemmatize(elt, pos=\"n\")\n for elt in liste\n if len(lemmatizer.lemmatize(elt, pos=\"n\")) > len_elt\n ]\n elif lem_v and lem_n:\n liste = [\n lemmatizer.lemmatize(elt, pos=\"v\")\n for elt in ligne.split()\n if (lemmatizer.lemmatize(elt, pos=\"v\") not in stopw)\n and (len(elt) > len_elt)\n ]\n elif lem_v and lem_n:\n liste = [\n lemmatizer.lemmatize(elt, pos=\"n\")\n for elt in ligne.split()\n if (lemmatizer.lemmatize(elt, pos=\"n\") not in stopw)\n and (len(elt) > len_elt)\n ]\n else:\n liste = [\n elt\n for elt in ligne.split()\n if (elt not in stopw) and (len(elt) > len_elt)\n ]\n ligne = \" \".join(liste)\n return ligne", "def load_conll_notags(unfile, max_slen, vocab=[], oovs={}, pads={}, lower=False, mwe=True, unk_case=True):\n # special characters used for splitting words\n split_chars = set([',', '.', ':', '-', '~', \"'\", '\"'])\n\n # punctuation that denotes when a sentence finishes\n sent_split_words = set(['.', '?', '!', ';', '—'])\n\n input_sents = []\n input_words = []\n windex = -1\n\n # number of words from which to split sentences\n LIMIT_SENT_LEN = max_slen\n\n sents = []\n if 'begin' in pads:\n next_words = [pads['begin']]\n next_syms = ['']\n next_indexs = [windex]\n sent_base_length = 1\n else:\n next_words = []\n next_syms = []\n next_indexs = []\n sent_base_length = 0\n\n # select files to use\n input_files = [unfile]\n\n # counters\n num_raw_sents = 0\n num_sents = 0\n num_words = 0\n num_oovs = 0\n\n # iterate over lines in the input files\n for ifile in input_files:\n for line in codecs.open(ifile, mode = 'r', errors = 'ignore', encoding = 'utf-8'):\n # discard newline character\n line = line[:-1]\n\n # keep adding words while in the middle of a sentence\n if line:\n word = line.split('\\t')[0]\n sym = word\n # add new original word\n windex += 1\n input_words.append(word)\n num_words += 1\n # lowercase when indicated\n if lower:\n word = word.lower()\n # use an heuristic and try to map oov words\n if vocab and word not in vocab:\n if word not in split_chars:\n if re.match('^[0-9\\.\\,-]+$', word):\n word = oovs['number']\n elif _match_word_vocab(word, vocab) != word:\n word = _match_word_vocab(word, vocab)\n elif ' ' in word or '~' in word or '-' in word and mwe:\n # attempt to split multi-word expressions\n constituents_text = re.split('[\\s~ | \\s-]+', word)\n constituents = [_match_word_vocab(w, vocab) for w in constituents_text]\n if all([True if c in vocab else False for c in constituents]):\n next_words += constituents[:-1]\n next_syms += constituents[:-1]\n next_indexs += [windex] * len(constituents[:-1])\n word = constituents[-1]\n sym = constituents[-1]\n else:\n if unk_case and word[0].isupper():\n word = oovs['UNKNOWN']\n else:\n word = oovs['unknown']\n num_oovs += 1\n else:\n if unk_case and word[0].isupper():\n word = oovs['UNKNOWN']\n else:\n word = oovs['unknown']\n num_oovs += 1\n else:\n word = oovs['unknown']\n num_oovs += 1\n\n next_words.append(word)\n next_syms.append(sym)\n next_indexs.append(windex)\n\n # stack the current sentence upon seeing an empty line or a sentence end mark\n if not line or (len(next_words) > 3 and next_words[-4] in sent_split_words) or (len(next_words) >= LIMIT_SENT_LEN and len(sent_split_words.intersection(next_words)) < 1):\n if len(next_words) > sent_base_length:\n # split when an empty line marks a sentence end\n if not line:\n if 'end' in pads:\n next_words.append(pads['end'])\n next_syms.append('')\n next_indexs.append(-1)\n sents.append(list(zip(next_words, next_indexs, next_syms)))\n input_sents.append(input_words)\n input_words = []\n windex = -1\n next_words = []\n next_syms = []\n next_indexs = []\n num_raw_sents += 1\n num_sents += 1\n # split when punctuation marks a sentence end\n elif len(next_words) > 3 and next_words[-4] in sent_split_words:\n split_words = next_words[:-3]\n split_syms = next_syms[:-3]\n split_indexs = next_indexs[:-3]\n if 'end' in pads:\n split_words.append(pads['end'])\n split_syms.append('')\n split_indexs.append(-1)\n sents.append(list(zip(split_words, split_indexs, split_syms)))\n next_words = next_words[-3:]\n next_syms = next_syms[-3:]\n next_indexs = next_indexs[-3:]\n num_sents += 1\n # split when the maximum sentence length is reached\n # a bad guess is better than not guessing when predicting tags\n else:\n sents.append(list(zip(next_words, next_indexs, next_syms)))\n next_words = []\n next_syms = []\n next_indexs = []\n num_sents += 1\n\n if 'begin' in pads:\n next_words = [pads['begin']] + next_words\n next_syms = [''] + next_syms\n next_indexs = [-1] + next_indexs\n\n else:\n input_sents.append(input_words)\n input_words = []\n windex = -1\n num_raw_sents += 1\n\n # double check the last sentence\n if len(next_words) > sent_base_length:\n if 'end' in pads:\n next_words.append(pads['end'])\n next_syms.append('')\n next_indexs.append(-1)\n sents.append(list(zip(next_words, next_indexs, next_syms)))\n input_sents.append(input_words)\n input_words = []\n windex = -1\n num_raw_sents += 1\n num_sents += 1\n\n # find the allowed sentence length\n print('[INFO] Number of unlabelled OOV words: ' + str(num_oovs) + ' / ' + str(num_words))\n print('[INFO] Original number of unlabelled sentences: ' + str(num_raw_sents))\n print('[INFO] Number of extracted unlabelled sentences ' + str(num_sents))\n return input_sents, sents", "def split_corpus_tags(self, corpus):\n logging.info('Dividindo texto das tags')\n sentences = []\n tags = []\n dict_tags = {}\n for sentence in corpus:\n sentence_tmp = sentence.replace(\"\\n\", '')\n words_tmp = []\n tags_tmp = []\n words = sentence_tmp.split(\" \")\n for word in words:\n tag_word = word.split(\"_\")\n if tag_word[0] == \"\": pass\n else:\n words_tmp.append(tag_word[0])\n tags_tmp.append(tag_word[1])\n if not tag_word[1] in dict_tags.keys(): \n dict_tags[tag_word[1]] = {}\n dict_tags[tag_word[1]]['right'] = 0\n dict_tags[tag_word[1]]['pred'] = 0\n dict_tags[tag_word[1]]['pres'] = 1\n else: dict_tags[tag_word[1]]['pres'] += 1\n sentences.append(words_tmp)\n tags.append(tags_tmp)\n return sentences, tags, dict_tags", "def get_wordnet_pos(self, word):\n # token = word_tokenize(word)\n base_tag = pos_tag([word])[0][1][:2]\n return self.pos_tag_dict.get(base_tag, wordnet.NOUN)", "def ids_to_tree(self, emb, postprocess=True):\n\n tree = TreeData()\n tokens = self.ids_to_strings(emb)\n\n for token in tokens:\n if token in ['<GO>', '<STOP>', '<VOID>']:\n continue\n if postprocess:\n # casing (only if set to lowercase)\n if self.lowercase and len(tree) == 1 or tree.nodes[-1].t_lemma in ['.', '?', '!']:\n token = token[0].upper() + token[1:]\n # plural merging (if plural tokens come up)\n if token == '<-s>' and tree.nodes[-1].t_lemma is not None:\n token = self._singular_to_plural(tree.nodes[-1].t_lemma)\n tree.remove_node(len(tree) - 1)\n elif token == '<-s>':\n continue\n\n tree.create_child(0, len(tree), NodeData(token, 'x'))\n\n return tree", "def format_for_nltk(labels, dataset):\n if len(labels) != len(dataset):\n return []\n return [(v, labels[i]) for i,v in enumerate(dataset)]", "def data_cleaner(doc):\n \n sw = stopwords.words('english')\n regex_token = RegexpTokenizer(r\"([a-zA-Z]+(?:’[a-z]+)?)\")\n doc = regex_token.tokenize(doc)\n doc = [word.lower() for word in doc]\n doc = [word for word in doc if word not in sw]\n #print(doc)\n doc = pos_tag(doc)\n doc = [(word[0], get_wordnet_pos(word[1])) for word in doc]\n #print(doc)\n lemmatizer = WordNetLemmatizer() \n doc = [lemmatizer.lemmatize(word[0], word[1]) for word in doc]\n #print(' '.join(doc))\n return ' '.join(doc)", "def convertSVT(svtImgDir, svtXMLFile, outPrefix, objectives):\n\n imgFileList, wordList = svtXML(svtXMLFile)\n lenList, charMat = wordsToChars(wordList)\n outFilenames = makeLabelFiles(objectives, svtImgDir, imgFileList, lenList,\n charMat, outPrefix)\n return outFilenames", "def test_corpus_labeling(self):\n corpusName = \"test\"\n built_corpus_Path = corpus_tools.getDataPath(corpusName)\n filename = built_corpus_Path + \"-GT\"\n reader = LinguoDatasetReader()\n with tempfile.TemporaryDirectory() as temp_dir:\n outpath = temp_dir + \"-labeled\"\n corpus_tools.labelCorpus(filename, outpath,\n g_label=0, ug_type=\"WS\")\n original = corpus_tools.load_tokenized_corpus(filename)\n loaded = reader.read(outpath)\n for original_sent, loaded_sent in zip(original, loaded):\n self.assertEqual(loaded_sent.fields[\"g_label\"].label,\n \"ungrammatical\")\n self.assertEqual(loaded_sent.fields[\"ug_type\"].label, \"WS\")\n plain_loaded = [str(token) for\n token in loaded_sent.fields[\"sentence\"].tokens]\n self.assertEqual(plain_loaded, original_sent)", "def load_wordnet():\n mode = None\n # Use whichever version is latest\n mtime_xml = max(os.path.getmtime(f) for f in glob(\"src/xml/*.xml\"))\n mtime_yaml = max(os.path.getmtime(f) for f in glob(\"src/yaml/*.yaml\"))\n if os.path.exists(\"wn.xml\"):\n mtime_wn_xml = os.path.getmtime(\"wn.xml\")\n else:\n mtime_wn_xml = 0\n if os.path.exists(\"wn.pickle\"):\n mtime_pickle = os.path.getmtime(\"wn.pickle\")\n else:\n mtime_pickle = 0\n if mtime_yaml > mtime_xml and mtime_yaml > mtime_wn_xml and mtime_yaml > mtime_pickle:\n print(\"Reading from YAML\")\n wn = wordnet_yaml.load()\n pickle.dump(wn, open(\"wn.pickle\", \"wb\"))\n elif mtime_xml > mtime_wn_xml and mtime_xml > mtime_pickle:\n print(\"Merging and reading XML\")\n wn_merge()\n wn = parse_wordnet(\"wn.xml\")\n pickle.dump(wn, open(\"wn.pickle\", \"wb\"))\n elif mtime_wn_xml > mtime_pickle:\n print(\"Reading XML\")\n wn = parse_wordnet(\"wn.xml\")\n pickle.dump(wn, open(\"wn.pickle\", \"wb\"))\n else:\n wn = pickle.load(open(\"wn.pickle\", \"rb\"))\n return wn", "def get_trees(self, word): # -> list:\r\n raise NotImplementedError", "def get_nouns(lemmas_tags):\r\n nouns = []\r\n for lemma in lemmas_tags:\r\n \"\"\"si la etiqueta es None porque no tiene lemma o es un sustantivo\"\"\"\r\n if lemma[1] == None or lemma[1][0] == 'n':\r\n \"\"\"se agrega solamente el lemma\"\"\"\r\n nouns.append(lemma[0])\r\n return nouns", "def tags_to_labels(tags, tag_indices):\n num_samples = len(tags)\n num_labels = len(tag_indices)\n\n labels = numpy.zeros((num_samples, num_labels), dtype=bool)\n for sample_index, sample_name in enumerate(tags.keys()):\n for tag in tags[sample_name]:\n labels[sample_index, tag_indices[tag]] = 1\n return labels", "def buildTree(self, tree=None):\n if tree is None:\n tree = phylogeny.Tree()\n tree.buildFromString(self.newick, False) # tree descriptions from NCL 2.1 are 1-based not 0-based\n return tree", "def binarize(tree):\n if isinstance(tree, str):\n return Tree('0',[tree])\n elif len(tree) == 1:\n# print(tree)\n# print('\\n')\n return binarize(tree[0])\n else:\n label = tree.label()\n# print(type(label))\n return reduce(lambda x, y: Tree(label, (binarize(x), binarize(y))), tree)", "def test_text_classifier_tsne_post(self):\n pass", "def ids_to_tree(self, emb, postprocess=True):\n\n tree = TreeData()\n tokens = self.ids_to_strings(emb)\n\n for token in tokens:\n if token in ['<GO>', '<STOP>', '<VOID>']:\n continue\n tree.create_child(0, len(tree), NodeData(token, 'x'))\n\n return tree" ]
[ "0.6572314", "0.65697056", "0.65697056", "0.64920396", "0.6471394", "0.64376074", "0.64255834", "0.64045995", "0.6399852", "0.6393718", "0.6372048", "0.6361967", "0.63461703", "0.6333776", "0.6136483", "0.60728174", "0.60711044", "0.6015081", "0.60002613", "0.58672816", "0.57813233", "0.5754627", "0.5743112", "0.572407", "0.5698222", "0.5647807", "0.5588081", "0.557537", "0.55677843", "0.55483574", "0.5531726", "0.54523504", "0.5436855", "0.5383038", "0.5338179", "0.5328024", "0.52954155", "0.5286195", "0.52780604", "0.5277641", "0.5268615", "0.52403873", "0.5238756", "0.52283525", "0.5227159", "0.52258956", "0.52111423", "0.52111137", "0.52042353", "0.5203123", "0.52008986", "0.51990545", "0.5194286", "0.51939917", "0.51939917", "0.51939917", "0.51939917", "0.51939917", "0.51939917", "0.51908946", "0.51657915", "0.5138851", "0.51374686", "0.5136704", "0.51364636", "0.5126968", "0.5119719", "0.51136976", "0.5111297", "0.51103747", "0.51102173", "0.51024616", "0.5094172", "0.5090154", "0.50884545", "0.5078407", "0.50745386", "0.50735646", "0.50688064", "0.50645214", "0.5059758", "0.50560236", "0.5032597", "0.50300074", "0.50162035", "0.5014623", "0.5012134", "0.5010546", "0.49968368", "0.4991651", "0.49862093", "0.49770096", "0.4969152", "0.49676841", "0.4943103", "0.4936766", "0.49305138", "0.49275365", "0.49254936", "0.48976222" ]
0.5299345
36
Make API request with given params. Signs the request with secret key.
def request(self, params): params["public_key"] = self.__PUBLIC_KEY post_data = urllib.parse.urlencode(params) hmac_ = hmac.new(self.__SECRET_KEY, post_data.encode("utf-8"), hashlib.sha256).hexdigest() curl = pycurl.Curl() curl.setopt(pycurl.URL, self.__URL) curl.setopt(pycurl.HTTPHEADER, ['HMAC: ' + str(hmac_)]) curl.setopt(pycurl.POST, True) curl.setopt(pycurl.POSTFIELDS, post_data) curl.setopt(pycurl.CONNECTTIMEOUT, 10) curl.setopt(pycurl.TIMEOUT, 5) buf = io.BytesIO() curl.setopt(pycurl.WRITEFUNCTION, buf.write) curl.perform() response = buf.getvalue() # Uncomment to debug raw JSON response # self.__log("< " + response) http_code = curl.getinfo(pycurl.HTTP_CODE) curl.close() result = json.loads(response.decode('utf-8')) if http_code != 200: if result["error"]: # 404 with some valid JSON self.__log("ERROR: HTTP " + str(http_code) + ": " + json.dumps(result["error"])) else: self.__log("ERROR: HTTP " + str(http_code) + ": " + response) else: if result is None: self.__log("ERROR: Unparsable JSON " + response) else: if 'error' in result: # || !$result["result"] self.__log("ERROR: " + json_encode(result["error"])) else: result = result["result"] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def http_signed_call(self, api_endpoint, params):\r\n if (not self.secret) or (not self.secret.know_secret()):\r\n self.debug(\"### don't know secret, cannot call %s\" % api_endpoint)\r\n return\r\n\r\n key = self.secret.key\r\n sec = self.secret.secret\r\n\r\n if self.use_tonce():\r\n params[\"tonce\"] = self.get_unique_mirotime()\r\n else:\r\n params[\"nonce\"] = self.get_unique_mirotime()\r\n\r\n post = urlencode(params)\r\n prefix = api_endpoint + chr(0)\r\n # pylint: disable=E1101\r\n sign = hmac.new(base64.b64decode(sec), prefix + post, hashlib.sha512).digest()\r\n\r\n headers = {\r\n 'Rest-Key': key,\r\n 'Rest-Sign': base64.b64encode(sign)\r\n }\r\n\r\n use_ssl = self.config.get_bool(\"gox\", \"use_ssl\")\r\n proto = {True: \"https\", False: \"http\"}[use_ssl]\r\n url = \"%s://%s/api/2/%s\" % (\r\n proto,\r\n HTTP_HOST,\r\n api_endpoint\r\n )\r\n self.debug(\"### (%s) calling %s\" % (proto, url))\r\n return json.loads(http_request(url, post, headers))", "def _request(self, account, method, params, key):\n params_bytes = py23_bytes(json.dumps(params), self.ENCODING)\n params_enc = base64.b64encode(params_bytes).decode(self.ENCODING)\n timestamp = datetime.utcnow().strftime(self.TIMEFORMAT)[:-3] + \"Z\"\n nonce_int = random.getrandbits(64)\n nonce_bytes = struct.pack('>Q', nonce_int) # 64bit ULL, big endian\n nonce_str = \"%016x\" % (nonce_int)\n\n message = self.prehash_message(timestamp, account, method,\n params_enc, nonce_bytes)\n signature = sign_message(message, key)\n signature_hex = hexlify(signature).decode(self.ENCODING)\n\n request = {\n \"jsonrpc\": \"2.0\",\n \"id\": self.id,\n \"method\": method,\n \"params\": {\n \"__signed\": {\n \"account\": account,\n \"nonce\": nonce_str,\n \"params\": params_enc,\n \"signatures\": [signature_hex],\n \"timestamp\": timestamp\n }\n }\n }\n r = requests.post(self.url, data=json.dumps(request))\n self.id += 1\n return r.json()", "def sign_request(self, request, api_call, params):\n for key, value in params.items():\n params[key] = value.encode('utf-8')\n\n # Do not POST authentication parameters. Use them to create an\n # authentication header instead.\n access_token = params.pop('access_token', None)\n client_id = params.pop('client_id', None)\n client_secret = params.pop('client_secret', None)\n\n # create the authorization header\n if access_token:\n request.add_header(\"Authorization\", \"OAuth {}\".format(access_token))\n else:\n timestamp = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())\n data = \"{}\\n{}\\n\".format(api_call, timestamp)\n if params:\n kv_str = [\"{}={}\".format(k, v) for k, v in params.iteritems()]\n kv_str.sort()\n data = data + \"\\n\".join(kv_str) + \"\\n\"\n sha1_str = hmac.new(client_secret, data, sha1).digest()\n hash_str = b64encode(sha1_str)\n request.add_header(\"Date\", timestamp)\n request.add_header(\"Authorization\",\n \"Signature {}:{}\".format(client_id, hash_str))", "def send_signed_call(self, api_endpoint, params, reqid):\r\n if (not self.secret) or (not self.secret.know_secret()):\r\n self.debug(\"### don't know secret, cannot call %s\" % api_endpoint)\r\n return\r\n\r\n key = self.secret.key\r\n sec = self.secret.secret\r\n\r\n call = {\r\n \"id\" : reqid,\r\n \"call\" : api_endpoint,\r\n \"params\" : params,\r\n \"currency\" : self.curr_quote,\r\n \"item\" : self.curr_base\r\n }\r\n if self.use_tonce():\r\n call[\"tonce\"] = self.get_unique_mirotime()\r\n else:\r\n call[\"nonce\"] = self.get_unique_mirotime()\r\n call = json.dumps(call)\r\n\r\n # pylint: disable=E1101\r\n sign = hmac.new(base64.b64decode(sec), call, hashlib.sha512).digest()\r\n signedcall = key.replace(\"-\", \"\").decode(\"hex\") + sign + call\r\n\r\n self.debug(\"### (socket) calling %s\" % api_endpoint)\r\n self.send(json.dumps({\r\n \"op\" : \"call\",\r\n \"call\" : base64.b64encode(signedcall),\r\n \"id\" : reqid,\r\n \"context\" : \"mtgox.com\"\r\n }))", "def request(self, methods, params, format='json'):\n params['api_key'] = self.api_key\n params['expire'] = int(time.time()) + 600 # Grant this request 10 minutes.\n params['format'] = format\n if 'sig' in params: del params['sig']\n params['sig'] = self.hash_args(params)\n\n request_url = '/'.join([self.ENDPOINT, str(self.VERSION)] + methods) + '/?' + self.unicode_urlencode(params)\n #print request_url\n request = urllib.urlopen(request_url)\n data = request.read()\n\n return json.loads(data)", "def sign(self, params: Dict[str, Any]) -> str:\n\n assert self.secret is not None, \"A client secret is required to sign requests.\"\n\n query = urlencode(params)\n signature = hmac.new(self.secret.encode(), query.encode(), hashlib.sha512)\n\n return signature.hexdigest()", "def make_request(self, url, base_uri=None, params=None, auth=REQUIRED, method=\"GET\", silo=False, **kwargs):\n\n params = params or dict()\n body = kwargs.get('body', '')\n headers = {'User-Agent': 'python-photobucket/0.2 (Language=Python)', 'Content-type':'application/x-www-form-urlencoded'}\n headers.update(kwargs.get('extra_headers', {}))\n # Unless explicitly provided, set the default response format to json.\n params.setdefault('format', 'json')\n if 'id' in params:\n params['id'] = self.clean_identifier(params['id'])\n # Remove all params with a value of \"None\"\n params = remove_empty(params)\n\n # Begin auth stuff...\n token = None\n consumer = OAuthConsumer(key=self.key, secret=self.secret)\n if auth in (REQUIRED, OPTIONAL):\n # Setup the oauth token\n try:\n token = Token(key=self.token, secret=self.token_secret)\n except ValueError, e:\n if auth == REQUIRED:\n # Only raise the exception if auth is required.\n raise PhotobucketAPIError(\"Token and Token secret must be set.\")\n\n # Give priority to base_uri since its a quick override of class.URI\n req_uri = \"%s%s\" % (base_uri or self.URI, url)\n\n if silo:\n # This request has to be sent to a specific \"silo\" or \"subdomain\".\n uri = \"http://%s%s\" % (self.subdomain, req_uri)\n # Don't allow redirects if this is to be sent to a specific silo.\n # For in photobucket's own words..\n # \"Photobucket ultimately prefers that you use the information given, rather than relying on the redirects\"\n allow_redirects = False\n else:\n uri = \"http://%s%s\" % (self.DOMAIN, req_uri)\n allow_redirects = True\n req = OAuthRequest.from_consumer_and_token(consumer, token, method, uri, parameters=params, body=body)\n\n # Make sure to ALWAYS pass the main domain to the signature instead of the actual url to be requested.\n req.normalized_url = \"http://%s%s\" % (self.DOMAIN, req_uri)\n req.sign_request(SignatureMethod_HMAC_SHA1(), consumer, token)\n\n try:\n # I do this to take advantage of the already defined requests and their default values.\n response = getattr(requests, method.lower())(req.to_url(), headers=headers, allow_redirects=allow_redirects)\n response.raise_for_status(allow_redirects=allow_redirects)\n except AttributeError:\n raise PhotobucketAPIError('Invalid Http method')\n except HTTPError, e:\n # This whole handling is still in Beta. \n # Because I'm still deciding on whether to keep it \n # or use \"safe_mode\" for all \"POST\" requests. To take advantage of Photobucket's redirect.\n # Suggestions are more than welcome...\n if e.response.status_code == REDIRECT:\n # Need to catch a redirect error because that means that user sent a request\n # without a \"silo\" so it needs to be stored.\n content = self.parse_response(e.response.content, params['format'])\n # Not too sure about this...\n self.subdomain = content['content']['subdomain'].split('//')[1]\n return self.make_request(url, base_uri, params, auth, method, silo, **kwargs)\n error = PhotobucketError(e.message)\n error.response = e.response\n raise error\n return response", "def __signed_GET(self, api_url, params={}, timeout=5):\r\n sign_str = ''\r\n for key in sorted(params.keys()):\r\n _ = '&' + key + '=' + str(params[key])\r\n sign_str += _\r\n payload_str = 'GET' + '&' + api_url + sign_str\r\n signature = hmac.new(bytes(self.secret, encoding='utf-8'), bytes(payload_str, encoding='utf-8'), digestmod=hashlib.sha256).hexdigest()\r\n params['sign'] = signature\r\n url = self.__base_url + api_url\r\n try:\r\n r = requests.get(url, params=params, timeout=timeout)\r\n r.raise_for_status()\r\n except ReadTimeout:\r\n print(\"get timeout\")\r\n return\r\n except requests.exceptions.HTTPError as err:\r\n print(err)\r\n return\r\n if r.status_code == 200:\r\n return r.json()", "def _make_api_call(url, params=None):\n if params is None:\n params = {}\n\n # params['apikey'] = CONFIG.BIOPORTAL_API_KEY\n params['apikey'] = \"8316a8aa-ff8e-4d6e-aa95-faeabfc72d2a\"\n return requests.get(url, params=params)", "def _make_request(self, url: str, parameters: dict = None,\n method: str = 'GET', *args, **kwargs):\n response = requests.request(\n method=method,\n url=build_url(\n self.BASE_API_URL, url, parameters\n ),\n headers={\n 'Authorization': 'Bearer {}'.format(self._access_token)\n }, **kwargs\n )\n if response.ok:\n return response.json()\n raise MondoApiException(response.json()['message'])", "def signed_request(self, method, api_url, **payload):\n\n r_url = self.base_url + api_url\n payload['timestamp'] = self.get_server_time()\n payload['signature'] = self.get_signed(**payload)\n\n headers = {\n 'X-MBX-APIKEY': self.key,\n }\n\n try:\n r = requests.request(method, r_url, headers=headers, params=payload)\n r.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print(err)\n print(err.text)\n sys.exit(1)\n if r.status_code == 200:\n return r.json()", "def _v1_request(self, path, method, params={}):\n url = '{}/{}/{}'.format(\n self.api_server,\n self.V1_API,\n path,\n )\n params.update({\n self.API_KEY: self.private_token\n })\n\n return self._request(method, url, params)", "def enqueue_http_request(self, api_endpoint, params, reqid):\r\n if self.secret and self.secret.know_secret():\r\n self.http_requests.put((api_endpoint, params, reqid))", "def __sign_POST(self, api_url, params, timeout):\r\n sign_str = ''\r\n for key in sorted(params.keys()):\r\n _ = '&' + key + '=' + str(params[key])\r\n sign_str += _\r\n payload_str = 'POST' + '&' + api_url + sign_str\r\n signature = hmac.new(bytes(self.secret, encoding='utf-8'), bytes(payload_str, encoding='utf-8'), digestmod=hashlib.sha256).hexdigest()\r\n params['sign'] = signature\r\n url = self.__base_url + api_url\r\n try:\r\n r = requests.post(url,data=params, timeout=timeout)\r\n r.raise_for_status()\r\n except ReadTimeout:\r\n print(\"post timeout\")\r\n return\r\n except requests.exceptions.HTTPError as err:\r\n print(err)\r\n return\r\n if r.status_code == 200:\r\n return r.json()", "def signed_request(self, method, api_url, **payload):\n\n param = ''\n if payload:\n sort_pay = sorted(payload.items())\n # sort_pay.sort()\n for k in sort_pay:\n param += '&' + str(k[0]) + '=' + str(k[1])\n param = param.lstrip('&')\n timestamp = str(int(time.time() * 1000))\n full_url = self.base_url + api_url\n\n sig_str = ''\n if method == 'GET':\n if param:\n full_url = full_url + '?' + param\n sig_str = method + full_url + timestamp\n elif method == 'POST':\n sig_str = method + full_url + timestamp + param\n\n signature = self.get_signed(bytes(sig_str, 'utf-8'))\n\n headers = {\n 'FC-ACCESS-KEY': self.key,\n 'FC-ACCESS-SIGNATURE': signature,\n 'FC-ACCESS-TIMESTAMP': timestamp\n }\n\n try:\n r = requests.request(method, full_url, headers=headers, json=payload)\n r.raise_for_status()\n if r.status_code == 200:\n return r.json()\n else:\n return False, {'error': 'E10000', 'data': r.status_code}\n except requests.exceptions.HTTPError as err:\n return False, {'error': 'E10001', 'data': r.text}\n except Exception as err:\n return False, {'error': 'E10002', 'data': err}", "def signed_request(params):\n has_signature = False\n keys = params.keys()\n if \"signature\" in keys:\n has_signature = True\n keys.remove(\"signature\")\n keys.sort()\n if has_signature:\n keys.append(\"signature\")\n query_string = \"&\".join(quote(key, \"~\") + \"=\" + quote(params[key], \"~\") for key in keys)\n return query_string", "def sign_request(self, host, endpoint, params, headers, method, payload=\"\", time=time):\n\n request_date = time.strftime('%Y%m%dT%H%M%SZ', time.gmtime())\n\n signature = self.calculate_signature(request_date, host, endpoint, params, headers, method, payload, time)\n\n canonical_query = [\n aws_quote(param) + '=' + aws_quote(params[param])\n for param in sorted(params.keys())\n ]\n canonical_query = '&'.join(canonical_query)\n\n return 'http://{host}{endpoint}?{query}&X-Amz-Signature={signature}'.format(\n host=host, endpoint=endpoint, query=canonical_query, signature=aws_quote(signature))", "def build_request(input_id, method, params=None):\n request = {\"method\": method, \"id\": input_id}\n if params is not None:\n request[\"params\"] = params\n return request", "def send(self, params=None):\n\t\tif params:\n\t\t\t# Update URL parameters (optional)\n\t\t\tself.params.update(params)\n\t\tLOGGER.debug(\"API %s request to %s with %s\", self.method_override, self.url, self.json or self.data)\n\t\t# Get a prepared request\n\t\trequest = self.prepare()\n\t\t# Take environment variables into account (especially for proxies...)\n\t\tsettings = self.api.merge_environment_settings(request.url, {}, None, None, None)\n\t\tr = self.api.send(request, **settings)\n\t\treturn self.api.create_response(r)", "def _sign_request(secret, method, url, timestamp, content_hash=None):\n message = f'{timestamp}{url}{method}{content_hash}'\n\n return hmac.new(secret.encode('utf-8'), message.encode('utf-8'), hashlib.sha512).hexdigest()", "def get_result(params):\n global PublicKey, ProjectId, url\n params[\"PublicKey\"] = PublicKey\n str_url = ''\n\n if ProjectId != '':\n params[\"ProjectId\"] = ProjectId\n params[\"Signature\"] = verfy_ac(params)\n\n for key, value in params.items():\n str_url += key + '=' + value + '&'\n\n r = requests.post(url)\n url = url + '/?' + str_url.strip('&')\n\n print(\"http status code:\", r.status_code)\n print(\"your url of api request:\\n\", url)", "def requester(get_args: dict) -> dict:\n get_args.update(dict(apikey = apikey))\n response = requests.get(URL, params = get_args)\n return response.json()", "def connect(method, url, params, stream=False):\n\t# Generate temporaly-used parameters\n\toauth_nonce = gen_oauth_nonce()\n\toauth_timestamp = gen_oauth_timestamp()\n\toauth_params = {\n\t\t'oauth_token' : users[user_name]['oauth_token'] if user_name != '' else oauth_token,\n\t\t'oauth_consumer_key' : oauth_consumer_key,\n\t\t'oauth_signature_method' : oauth_signature_method,\n\t\t'oauth_version' : oauth_version,\n\t\t'oauth_nonce' : oauth_nonce,\n\t\t'oauth_timestamp' : oauth_timestamp,\n\t}\n\toauth_signature = eval('build_signature(\"' + method.upper() + '\", url, oauth_params, params=params)')\n\toauth_params['oauth_signature'] = percent_encode(oauth_signature)\n\tfor key, val in params.items():\n\t\tparams[key] = str(val)\n\trequest = eval('requests.' + method + '(url, params, stream=stream, headers={\"Authorization\": build_oauth_header(oauth_params)})')\n\treturn request", "def _do_call(cls, method, url, params={}):\n headers = {\n 'User-Agent': 'py-retain/' + __version__,\n 'content-type': 'application/json'\n }\n try:\n r = cls.request_map[method.lower()]\n except KeyError:\n raise ValueError(\"Unknow HTTP Method\")\n response = r(\n url,\n auth=(cls.app_id, cls.api_key),\n headers=headers,\n data=json.dumps(params),\n timeout=cls.timeout)\n return response.json()", "def get_request(url, params={}):\n if isinstance(params, dict):\n if len(params) > 0:\n url += \"?\" + urllib.parse.urlencode(params)\n else:\n raise TypeError(\"data must be a dict\")\n headers = {}\n github_token = os.environ.get(\"GITHUB_TOKEN\")\n if github_token:\n headers[\"Authorization\"] = f\"Bearer {github_token}\"\n return urllib.request.Request(url, headers=headers)", "def sign_request(request, token, secret):\n if isinstance(token, unicode):\n token = token.encode(\"ascii\")\n if isinstance(secret, unicode):\n secret = secret.encode(\"ascii\")\n # Use MAC parameters from the request if present.\n # Otherwise generate some fresh ones.\n params = parse_authz_header(request, {})\n if params and params.pop(\"scheme\") != \"MAC\":\n params.clear()\n params[\"id\"] = token\n if \"ts\" not in params:\n params[\"ts\"] = str(int(time.time()))\n if \"nonce\" not in params:\n params[\"nonce\"] = os.urandom(5).encode(\"hex\")\n # Calculate the signature and add it to the parameters.\n params[\"mac\"] = get_mac_signature(request, secret, params)\n # Serialize the parameters back into the authz header.\n # WebOb has logic to do this that's not perfect, but good enough for us.\n request.authorization = (\"MAC\", params)", "def _api_request(self, endpoint, params=None):\n \n if params:\n response = requests.get(url=f\"{self.api_url}/{endpoint}\", headers={\"Authorization\":self.auth_header},\n params=params)\n else:\n response = requests.get(url=f\"{self.api_url}/{endpoint}\", headers={\"Authorization\":self.auth_header})\n code = response.status_code\n if 200 <= code < 300:\n logging.debug(f\"API call: {self.api_url}/{endpoint} | {code}\")\n encoding = response.encoding\n raw = response.content\n return json.loads(raw.decode(encoding))\n elif code > 500:\n raise APIAuthException\n else:\n logging.error(f\"ERROR: Bad API call: {self.api_url}/{endpoint} | {code}\")", "def auth_request_as_hmac(relative_url: str, params: dict=dict(), headers: dict=dict(), body: bytes=b'') -> (str, dict):\n import base64\n import time\n import collections\n import urllib.parse\n\n hex_key = bytearray.fromhex(HMAC_SECRET_KEY)\n\n print(\"Authenticating request using HMAC\")\n print(\"Secret key: {}\".format(bytes2hex(hex_key)))\n print()\n\n client_header = 'X-Kaiterra-Client'\n headers[client_header] = CLIENT_ID\n timestamp_header = 'X-Kaiterra-Time'\n headers[timestamp_header] = '{:x}'.format(int(time.time()))\n\n header_component = '{}={}&{}={}'.format(\n client_header, headers[client_header],\n timestamp_header, headers[timestamp_header]).encode('ascii')\n\n # Order doesn't matter\n relative_url_with_params = relative_url\n if params:\n relative_url_with_params += \"?\" + urllib.parse.urlencode(params)\n url_component = relative_url_with_params.encode('ascii')\n\n full_payload = header_component + url_component + body\n print(\"Full payload to be signed:\")\n print(full_payload)\n print()\n\n headers['X-Kaiterra-HMAC'] = base64.b64encode(hmac(hex_key, full_payload))\n\n return (API_BASE_URL.strip(\"/\") + relative_url_with_params, headers)", "def make_request(payload):\r\n\r\n log.info(\"make_request\\n{0}\\n{1}\".format(settings.API_URL,\r\n payload['method']))\r\n\r\n # log.debug(\"make_request\\n{0}\\n{1}\".format(settings.API_URL,\r\n # json.dumps(payload)))\r\n\r\n response = requests.post(settings.API_URL,\r\n data=json.dumps(payload),\r\n headers={\"content-type\": \"application/json\"},\r\n auth=(settings.API_USER, settings.API_PASS))\r\n\r\n #log.debug(response.content)\r\n response_json = response.json()\r\n\r\n if 'error' in response_json:\r\n log.error(response_json['error'])\r\n\r\n return response_json", "def _make_request(self, method, path, **kwargs):\r\n headers = {\r\n 'Content-Type': 'application/json',\r\n 'User-Agent': USER_AGENT,\r\n }\r\n headers.update(kwargs.get('headers', {}))\r\n kwargs['headers'] = headers\r\n kwargs['auth'] = self.auth\r\n\r\n url = '/'.join((self.endpoint, 'v1', self.account_id, path))\r\n resp = requests.request(method, url, **kwargs)\r\n resp.raise_for_status()\r\n return resp", "def generate_payload(\n self,\n method: str,\n url: str,\n params: Dict[str, Any] = None,\n ):\n # Nonce is standard EPOCH timestamp only accurate to 1s\n nonce = str(int(time.time()))\n body = \"\"\n # Need to build the full URL with query string for HS256 sig\n if params is not None and len(params) > 0:\n query_string = \"&\".join([f\"{k}={v}\" for k, v in params.items()])\n if method == \"GET\":\n url = f\"{url}?{query_string}\"\n else:\n body = query_string\n # Concat payload\n payload = f\"{method}{nonce}{url}{body}\"\n # Create HS256 sig\n sig = hmac.new(self.secret_key.encode(), payload.encode(), hashlib.sha256).hexdigest()\n # Base64 encode it with public key and nonce\n return b64encode(f\"{self.api_key}:{nonce}:{sig}\".encode()).decode().strip()", "def send_request(base, params):\n nb_req = 0\n while nb_req < NB_MAX_REQ:\n full_url = base + \"?\" + \"&\".join([\"%s=%s\" % (k, str(v).replace(' ', '+')) for (k, v) in params.items()])\n if len(KEY_LIST) > 0:\n if len(params) > 0:\n full_url += \"&\" + get_key()\n else:\n full_url += get_key()\n\n request = requests.get(full_url).json()\n if not request:\n nb_req += 1\n time.sleep(min(1, nb_req / 10))\n continue\n\n return request", "def request(host, path, url_params, consumer_key, consumer_secret, token, token_secret):\n # Unsigned URL\n encoded_params = ''\n if url_params:\n encoded_params = urllib.urlencode(url_params)\n url = 'http://%s%s?%s' % (host, path, encoded_params)\n \n\n # Sign the URL\n consumer = oauth2.Consumer(consumer_key, consumer_secret)\n oauth_request = oauth2.Request('GET', url, {})\n oauth_request.update({'oauth_nonce': oauth2.generate_nonce(),\n 'oauth_timestamp': oauth2.generate_timestamp(),\n 'oauth_token': token,\n 'oauth_consumer_key': consumer_key})\n\n token = oauth2.Token(token, token_secret)\n oauth_request.sign_request(oauth2.SignatureMethod_HMAC_SHA1(), consumer, token)\n signed_url = oauth_request.to_url()\n \n\n # Connect\n try:\n conn = urllib2.urlopen(signed_url, None)\n try:\n response = json.loads(conn.read())\n finally:\n conn.close()\n except urllib2.HTTPError, error:\n response = json.loads(error.read())\n\n return response", "def api( self, method, argc, **kwargs ):\n url = self.btce_url + argc + '/'\n body = urllib.urlencode(kwargs)\n sign = self.hash_hmac( body )\n headers = dict( Sign = sign, Uid = self.uid )\n if method == 'POST':\n response = requests.post( url,\n data = body,\n headers = headers,\n )\n elif method == 'GET':\n response = requests.get( url,\n headers = headers,\n )\n return response.text", "def request(host, path, url_params=None):\n url_params = url_params or {}\n url = 'http://{0}{1}?'.format(host, urllib.quote(path.encode('utf8')))\n\n consumer = oauth2.Consumer(CONSUMER_KEY, CONSUMER_SECRET)\n oauth_request = oauth2.Request(method=\"GET\", url=url, parameters=url_params)\n\n oauth_request.update(\n {\n 'oauth_nonce': oauth2.generate_nonce(),\n 'oauth_timestamp': oauth2.generate_timestamp(),\n 'oauth_token': TOKEN,\n 'oauth_consumer_key': CONSUMER_KEY\n }\n )\n token = oauth2.Token(TOKEN, TOKEN_SECRET)\n oauth_request.sign_request(oauth2.SignatureMethod_HMAC_SHA1(), consumer, token)\n signed_url = oauth_request.to_url()\n\n\n conn = urllib2.urlopen(signed_url, None)\n try:\n response = json.loads(conn.read())\n finally:\n conn.close()\n return response", "def _send_in_request(self):\n try:\n req_params = urllib.urlencode(self._params)\n except Exception as ex:\n raise ProxyError('Error signing request string') \n \n try:\n self.logger.debug('Send api request to: %s' % self._api_url)\n self.logger.debug('Request params: %s' % req_params)\n self.logger.debug('Request timeout: %s' % self._timeout)\n if len(self._params) > 0:\n f = urllib2.urlopen(self._api_url, req_params, self._timeout)\n response = f.read()\n self.logger.debug('Response length: %s' % len(response))\n f.close() \n return response\n else:\n return \"{'command':'ping', 'message':'ok'}\" \n except (urllib2.URLError) as ex:\n self._error = json.loads(ex.fp.readline()).values()\n raise ProxyResponseError()\n except (IOError) as ex:\n raise ProxyError(ex)", "def request_with_auth(self, method, *args, **kwargs):\r\n return getattr(self.client, method)(*args, HTTP_X_EDX_API_KEY=TEST_API_KEY, **kwargs)", "async def _make_request(self, url: str, params, server_id: str):\n headers = {\n 'X-Response-Control': 'minified',\n 'User-Agent': 'Friendly Red bot'\n }\n\n if server_id in self.config:\n if 'API_TOKEN' in self.config[server_id]:\n headers['X-Auth-Token'] = self.config['API_TOKEN']\n else:\n await self.bot.say(box('Requests made without an authentication token are limited to 100 requests per 24 hours.\\nYou can request a key by registering at http://api.football-data.org and setting it via [p]football tokenset.'))\n\n async with aiohttp.get(url, headers=headers, params=params) as r:\n if r.status == 200:\n data = await r.json()\n return data\n elif r.status == 400:\n await self.bot.say(box('Bad Request [400]:\\nYour request was malformed most likely the value of a Filter was not set according to the Data Type that is expected.'))\n return\n elif r.status == 403:\n await self.bot.say(box('Restricted Resource [403]:\\nYou tried to access a resource that exists, but is not available for you. This can be out of the following reasons:\\n- the resource is only available to authenticated clients\\n- the resource is only available to donating clients\\n- the resource is not available in the API version you are using'))\n return\n elif r.status == 404:\n await self.bot.say(box('Not found [404]\\nYou tried to access a resource that doesn’t exist.'))\n return\n elif r.status == 429:\n await self.bot.say(box('Too many requests [429]\\nYou exceeded your allowed requests per minute/day depending on API version and your user status.\\nSee http://api.football-data.org/docs/v1/index.html#_request_throttling for more information.'))\n await self.bot.say(box('Requests reset in ' + r.headers['X-RequestCounter-Reset'] + ' seconds.'))\n return\n else:\n await self.bot.say(box('Pancake has no idea what you\\'ve done, seriously.'))\n await self.bot.say(box(r.status + '\\n' + r.json()['error']))\n return", "def make_api_call(action, parameters = {}, method = 'get', data = {}):\n headers = {\n 'Content-type': 'application/json',\n 'Accept-Encoding': 'gzip',\n 'Authorization': 'Bearer %s' % ACCESS_TOKEN\n }\n if method == 'get':\n r = s.request(method, API_BASE_URL+action, headers=headers, params=parameters, timeout=30)\n elif method == 'post':\n r = s.request(method, API_BASE_URL+action, headers=headers, data=data, params=parameters, timeout=10)\n else:\n raise ValueError('Method should be get or post.')\n log('API %s call: %s' % (method, r.url) )\n if ((r.status_code == 200 and method == 'get') or (r.status_code == 201 and method == 'post')):\n return r.json()\n else:\n raise ValueError('API error when calling %s : %s' % (r.url, r.content))", "def request(self, verb, address, params=None, data=None):\n return BWUser.bare_request(verb=verb, address_root=self.api_url,\n address_suffix=address,\n access_token=self.token,\n params=params or dict(),\n data=data or dict())", "def request(self, url, data=None, params={}, files=None):\n params['token'] = self.token\n request = self.make_request(url, data=data, params=params, files=files)\n return request", "def _execApiCall(headers, params, method_name,\r\n domain='ma.gnolia.com',\r\n urlhead='/api/rest/1/'):\r\n \r\n if 'api_key' not in params and method_name not in ['echo', 'get_key']:\r\n raise MagnoliaException('Required API Key parameter missing')\r\n conn = httplib.HTTPConnection(domain)\r\n conn.request('POST', urlhead + method_name, params, headers)\r\n return conn.getresponse()", "def _make_request(self, method: str, params: Dict) -> Dict:\n\n # Define a new session.\n request_session = requests.Session()\n request_session.verify = True\n\n # Define a new request.\n request_request = requests.Request(\n method=method.upper(),\n url=self.bea_url,\n params=params\n ).prepare()\n\n # Send the request.\n response: requests.Response = request_session.send(\n request=request_request\n )\n\n # Close the Session\n request_session.close()\n\n print(response.url)\n\n # If the response is OK then return it.\n if response.ok and self._format == 'JSON':\n return response.json()\n elif response.ok and self._format == 'XML':\n return response.text\n else:\n raise requests.ConnectionError()", "def do_request(self, path, method='get', params=None, data=None,\n headers=None, cookies=None, auth=None):\n headers = {'Authorization': self.token}\n return super(FGasesRegistry, self).do_request(path,\n method=method,\n params=params,\n data=data,\n headers=headers,\n cookies=cookies,\n auth=auth)", "async def request(\r\n self, method: str, url: str, params: dict = None, data: dict = None\r\n ):\r\n async with self._session.request(\r\n method,\r\n url,\r\n params=params,\r\n json=data,\r\n headers={\"Authorization\": \"Bearer \" + self._token},\r\n ) as resp:\r\n if resp.status == 200:\r\n return await resp.json()\r\n if resp.status in (400, 422, 429, 500):\r\n data = None\r\n try:\r\n data = await resp.json()\r\n except Exception: # pylint: disable=broad-except\r\n pass\r\n raise APIResponseError(\r\n resp.request_info,\r\n resp.history,\r\n status=resp.status,\r\n message=resp.reason,\r\n headers=resp.headers,\r\n data=data,\r\n )\r\n resp.raise_for_status()", "def _api_request(*args, **kwargs):\n response = requests.request(*args, **kwargs)\n return APIResponse(response)", "def send_api_request(self, url, **kwargs):\n\n params = self._params.copy()\n dct = {k: kwargs[k] for k in kwargs if kwargs[k] is not None}\n params.update(dct)\n\n res = requests.get(url, params=params)\n if res.status_code != 200:\n try:\n error = res.json()['error']\n except ValueError:\n error = None\n raise SwrveApiException(error, res.status_code, url, params)\n\n return res.json()", "def request(self, base_uri, access_token=None, method='GET', body=None,\n headers=None, params=None, token_param='oauth_token'):\n\n args = {}\n args.update(params or {})\n if access_token is not None and method == 'GET':\n args[token_param] = access_token\n uri = '%s?%s' % (base_uri, urllib.urlencode(args))\n return self.http.request(uri, method=method, body=body, headers=headers)", "def request(path, params):\n res = requests.post(\n f'https://i.hostker.com/api{path}',\n data=params,\n headers={'Content-Type': 'application/x-www-form-urlencoded'}\n )\n res.encoding = 'utf-8'\n result = res.json()\n if result['success'] == 0:\n raise HostkerRequestError(result['errorMessage'])\n return result", "def requestToken(self):\n #################\n # BEGIN ROUTINE #\n #################\n # clear everything\n self.clear()\n # initialization\n self.request_oauth_nonce = self._generate_nonce()\n self.request_oauth_timestamp = self._generate_timestamp()\n # create Signature Base String\n method = \"POST\"\n url = self.getRequestTokenURL()\n query_dict = {\"oauth_callback\": self.CALLBACK_URL,\n \"oauth_consumer_key\": self.API_KEY,\n \"oauth_nonce\": self.request_oauth_nonce,\n \"oauth_signature_method\": self.signature_method,\n \"oauth_timestamp\": self.request_oauth_timestamp,\n \"oauth_version\": self.version,\n }\n query_string = self._quote(self._urlencode(query_dict))\n signature_base_string = \"&\".join([self._quote(method), self._quote(url), query_string])\n # create actual signature\n hashed = hmac.new(self._quote(self.API_SECRET) + \"&\", signature_base_string, sha)\n signature = binascii.b2a_base64(hashed.digest())[:-1]\n # it is time to create the heaader of the http request that will be sent\n header = 'OAuth realm=\"https://rightsignature.com\", '\n header += 'oauth_nonce=\"%s\", '\n header += 'oauth_callback=\"%s\", '\n header += 'oauth_signature_method=\"%s\", '\n header += 'oauth_timestamp=\"%d\", '\n header += 'oauth_consumer_key=\"%s\", '\n header += 'oauth_signature=\"%s\", '\n header += 'oauth_version=\"%s\"'\n header = header % (self.request_oauth_nonce, self._quote(self.CALLBACK_URL),\n self.signature_method, self.request_oauth_timestamp,\n self._quote(self.API_KEY), self._quote(signature), self.version)\n\n\n # next step is to establish an HTTPS connection through the LinkedIn API\n # and fetch the request token.\n connection = httplib.HTTPSConnection(self.API_ENDPOINT)\n connection.request(method, self.REQUEST_TOKEN_URL, body = self._urlencode(query_dict), headers = {'Authorization': header})\n response = connection.getresponse()\n if response is None:\n self.request_oauth_error = \"No HTTP response received.\"\n connection.close()\n return False\n\n response = response.read()\n connection.close()\n\n oauth_problem = self._get_value_from_raw_qs(\"oauth_problem\", response)\n if oauth_problem:\n self.request_oauth_error = oauth_problem\n return False\n\n self.request_token = self._get_value_from_raw_qs(\"oauth_token\", response)\n self.request_token_secret = self._get_value_from_raw_qs(\"oauth_token_secret\", response)\n return True", "def tapi(self,method,argc,**kwargs):\n url = self.btce_trade_url + argc + '/'\n kwargs['nonce'] = str(int(time.time()))\n kwargs['method'] = argc\n body = urllib.urlencode(kwargs)\n sign = self.hash_tapi( body )\n headers = dict( Sign = sign, Key = self.trade_key )\n if method == 'POST':\n response = requests.post( url,\n data = body,\n headers = headers,\n )\n elif method == 'GET':\n response = requests.get( url,\n headers = headers,\n )\n return response.text", "def request(self, method, *params):\n\n payload = {**self.payload, \"id\": self.request_id, \"method\": method}\n if params is not None:\n payload[\"params\"] = (*self.auth_params, *params)\n\n try:\n response = self.session.post(self.endpoint, json=payload).json()\n except json.decoder.JSONDecodeError as e:\n raise Exception(f\"Metaname API didn't return a JSON response: {e}\") from e\n except Exception as e:\n raise Exception(f\"Metaname API call failed: {e}\") from e\n\n if response.get(\"id\", None) != self.request_id:\n raise Exception(\n f\"Metaname API returned out of sequence response: {response}\"\n )\n else:\n self.request_id += 1\n\n if \"result\" in response:\n return response[\"result\"]\n elif \"error\" in response:\n raise Exception(f\"Metaname API error: {response['error']}\")\n else:\n raise Exception(f\"Metaname API returned an invalid response: {response}\")", "def _rpc_request(self, method, params, key):\n payload = {\n \"method\": method,\n \"params\": params,\n \"jsonrpc\": \"2.0\",\n \"id\": 0\n }\n res = requests.post(\n \"http://{}:{}\".format(self.url, self.port),\n data=json.dumps(payload),\n headers=self.headers).json()\n return res[key]", "async def _api_request(self,\n method: str,\n path_url: str,\n params: Dict[str, Any] = {}) -> Dict[str, Any]:\n base_url = f\"https://{global_config_map['gateway_api_host'].value}:\" \\\n f\"{global_config_map['gateway_api_port'].value}\"\n url = f\"{base_url}/{path_url}\"\n client = await self._http_client()\n if method == \"get\":\n if len(params) > 0:\n response = await client.get(url, params=params)\n else:\n response = await client.get(url)\n elif method == \"post\":\n response = await client.post(url, data=params)\n\n parsed_response = json.loads(await response.text())\n if response.status != 200:\n err_msg = \"\"\n if \"error\" in parsed_response:\n err_msg = f\" Message: {parsed_response['error']}\"\n raise IOError(f\"Error fetching data from {url}. HTTP status is {response.status}.{err_msg}\")\n if \"error\" in parsed_response:\n raise Exception(f\"Error: {parsed_response['error']}\")\n\n return parsed_response", "def apikey_request(self, method, api_url, **payload):\n r_url = self.base_url + api_url\n\n headers = {\n 'X-MBX-APIKEY': self.key,\n }\n\n try:\n r = requests.request(method, r_url, headers=headers, params=payload)\n r.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print(err)\n print(err.text)\n sys.exit(1)\n if r.status_code == 200:\n return r.json()", "async def _request(self, method_name: str, params: dict) -> dict:\n url = self.api_url + method_name\n\n async with self._session() as sess:\n async with sess.get(url, params=params) as res:\n return await res.json()", "def _request_key(self, key, method='get', params=None, **kwargs):\n uri = \"/{version_prefix}/keys{key}\".format(\n version_prefix=self._version_prefix,\n key=key\n )\n if params:\n uri += \"?\"\n sep = \"\"\n for param, value in sorted(params.items()):\n if isinstance(value, bool):\n value = str(value).lower()\n uri += \"%s%s=%s\" % (sep, param, value)\n sep = \"&\"\n return self._request_call(uri, method=method, **kwargs)", "def _request(self, endpoint: str = \"/api/\", params: object = {}) -> dict:\n ret: dict = {}\n try:\n if not self.api_key:\n ret[\"error\"] = \"API key is empty\"\n raise APIError(ret['error'])\n\n r = requests.get(f\"{self.apibase}{endpoint}\",\n params=params,\n headers=self.headers,\n verify=self.verify_ssl)\n response_data = orjson.loads(r.text)\n except orjson.JSONDecodeError:\n ret[\"error\"] = \"Failed to parse response data to JSON\"\n if self.debug:\n ret[\"error\"] += \"\\nDescription: \" + r.reason\n ret[\"error\"] += \"\\nData: \" + r.text\n except requests.HTTPError:\n ret[\"error\"] = f\"{r.status_code}: {r.reason}\"\n if self.debug:\n ret[\"error\"] += \"\\nDescription: \" + r.reason\n ret[\"error\"] += \"\\nData: \" + r.text\n\n if ret.get('error', None):\n raise APIError(ret['error'])\n check_status_code(request=r, debug=self.debug, ret=ret)\n\n ret = response_data\n return ret", "def make_request(self, key):\n params = {'requestedKey': key}\n res = requests.get(url=self.proxy_url, params=params)\n return res", "def httpapi_request(client, **params) -> 'Response':\n return requests.get(\n _HTTPAPI,\n params={\n 'client': client.name,\n 'clientver': client.version,\n 'protover': 1,\n **params\n })", "def req(method, path, params={'api_key': API_KEY}, body=None):\n if body is not None:\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json'\n }\n r = requests.request(method, URL + path, headers=headers, params=params, data=body, verify=VALIDATE_CERT)\n if r.status_code != (requests.codes['created'] or requests.codes['ok']):\n return_error('Error in API call to Threat Grid service %s - %s' % (path, r.text))\n return r\n\n else:\n r = requests.request(method, URL + path, params=params, verify=VALIDATE_CERT)\n if r.status_code != requests.codes.ok:\n return_error('Error in API call to Threat Grid service %s - %s' % (path, r.text))\n return r", "def __call__(self, request):\r\n # These checkings are necessary because type inconsisntecy of requests library\r\n # See request Github issue #230 https://github.com/kennethreitz/requests/pull/230\r\n if not request.params:\r\n request.params = {}\r\n if not request.data:\r\n request.data = {}\r\n if isinstance(request.params, list):\r\n request.params = dict(request.params)\r\n if isinstance(request.data, list):\r\n request.data = dict(request.data)\r\n\r\n # Dictionary to OAuth1 signing params\r\n request.oauth_params = {}\r\n\r\n # Adding OAuth params\r\n request.oauth_params['oauth_consumer_key'] = self.consumer.key\r\n request.oauth_params['oauth_timestamp'] = str(int(time.time()))\r\n request.oauth_params['oauth_nonce'] = str(random.randint(0, 100000000))\r\n request.oauth_params['oauth_version'] = self.OAUTH_VERSION\r\n if self.token:\r\n request.oauth_params['oauth_token'] = self.token.key\r\n if 'oauth_verifier' in request.data:\r\n request.oauth_params['oauth_verifier'] = request.data.pop('oauth_verifier')\r\n request.oauth_params['oauth_signature_method'] = self.signature.name\r\n\r\n # oauth_callback is an special parameter, we remove it out of the body\r\n # If it needs to go in the body, it will be overwritten later, otherwise not\r\n if 'oauth_callback' in request.data:\r\n request.oauth_params['oauth_callback'] = request.data.pop('oauth_callback')\r\n if 'oauth_callback' in request.params:\r\n request.oauth_params['oauth_callback'] = request.params.pop('oauth_callback')\r\n\r\n request.data_and_params = request.oauth_params.copy()\r\n request.oauth_params['oauth_signature'] = self.signature.sign(request, self.consumer, self.token)\r\n request.data_and_params['oauth_signature'] = request.oauth_params['oauth_signature']\r\n\r\n if self.header_auth:\r\n request.headers['Authorization'] = self.authorization_header(request.oauth_params)\r\n elif request.method in (\"GET\", \"DELETE\"):\r\n request.url = self.to_url(request)\r\n elif ('Content-Type' not in request.headers or \\\r\n request.headers['Content-Type'] != 'application/x-www-form-urlencoded') \\\r\n and not isinstance(request.data, basestring):\r\n # You can pass a string as data. See issues #10 and #12\r\n request.url = self.to_url(request)\r\n request.data = {}\r\n else:\r\n request.data = request.data_and_params\r\n\r\n return request", "def request(host, path, api_key, url_params=None):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()", "async def send_request(self, url: str, params: dict) -> dict:\n if self.session is None:\n # Create a session if one doesn't exist\n await self.create_session()\n\n async with self.session.get(url, params=params, headers=self.headers) as resp:\n # Make sure that the response of the request\n # returns code 200. Something wrong happened if it doesn't\n if not (300 > resp.status >= 200):\n # Raise an error if the status code isn't 200\n raise ParsingError(f\"Library error parsing request from API: {str(resp.status)}\")\n\n try:\n # We attempt to return the contents\n # of the request in JSON format\n response = await resp.json()\n if resp.status >= 400:\n # This is a validation error from the API\n # Likely has to do with missing/improper params\n missing_params = list()\n for param in response[\"detail\"]:\n missing_params.append(f\"{param['msg']} - {param['loc'][0]}\")\n raise InvalidParams(f\"Impropert params in given request: {missing_params}\")\n # If that fails, simply return the contents of the request\n # without ant kind of formatting (aka just read it)\n except aiohttp.ClientResponseError:\n raise ParsingError(\"Could not return contents from the request\")\n\n # Return the respose from the request, if any\n return response", "def request(self, resource, access_token, data=None, params=None,\n files=None, http_method=None):\n resource = resource.lstrip('/')\n if not resource.startswith('v'):\n resource = 'v1/' + resource\n url = self.url + resource\n\n req= {\n 'headers': {'Authorization': 'Bearer ' + access_token},\n 'params': params,\n }\n if data is not None:\n if http_method is None:\n http_method = 'post'\n req['data'] = dumps(data)\n req['headers']['Content-Type'] = 'application/json'\n if files:\n if http_method is None:\n http_method = 'post'\n req['files'] = files\n\n resp = getattr(requests, http_method or 'get')(url, **req)\n if resp.status_code == 200:\n return resp.json()\n elif resp.status_code == 204:\n return True\n self._process_error(resp)", "def _request(self, method, endpoint, version=None, data={}, headers={}, json=True, oauth=True):\n\t\tversion_data = '' if version is None else '.v{}'.format(version)\n\t\theaders.setdefault('Accept', 'application/vnd.twitchtv{}+json'.format(version_data))\n\t\tif self.oauth and oauth:\n\t\t\theaders.setdefault(\"Authorization\", \"OAuth {}\".format(self.oauth))\n\t\theaders.setdefault(\"Client-ID\", self.client_id)\n\t\tif endpoint.startswith('https://'):\n\t\t\turl = endpoint\n\t\telse:\n\t\t\turl = urljoin(self.base_url, endpoint)\n\t\tdata_arg = 'json' if method == 'POST' else 'params'\n\t\tresponse = requests.request(method, url, headers=headers, **{data_arg: data})\n\t\tresponse.raise_for_status()\n\t\tif json:\n\t\t\treturn response.json()\n\t\telse:\n\t\t\treturn response", "def request(self, params=None):\n\t\trequest = APIRequest(self.api)\n\t\tfor attr in APIRequest.attrs:\n\t\t\tval = getattr(self, attr)\n\t\t\t# Copy Mappings (e.g. headers)\n\t\t\tval = dict(val) if isinstance(val, collections.abc.Mapping) else val\n\t\t\tsetattr(request, attr, val)\n\t\t# Update GET parameters\n\t\tif params:\n\t\t\trequest.params.update(params)\n\t\treturn request", "def request(host=API_HOST, path=SEARCH_PATH, api_key=API_KEY, url_params=params):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()", "def _request(cls, method, url, request=None, keep_trying=False, *args,\n **kwargs):\n\n stream = kwargs.pop('stream', False)\n prepared_request = requests.Request(\n method, url, *args, **kwargs).prepare()\n\n # prepared_request.headers[\"Content-Type\"] = \"application/json\"\n if request and request.alice_id:\n prepared_request.headers[\"Cookie\"] = \"sessionid={}\".format(\n request.alice_id\n )\n\n url = urlsplit(url)\n path = bytes(url.path, \"utf-8\")\n if url.query:\n path += bytes(\"?{}\".format(url.query), \"utf-8\")\n salt = bytes(settings.UI_SECRET, \"utf-8\")\n\n body = prepared_request.body or b\"\"\n if isinstance(body, str):\n body = bytes(body, \"utf-8\")\n\n signature = sha256(path + body + salt).hexdigest()\n prepared_request.headers[\"X-Signature\"] = signature\n\n response = cls.send_request(prepared_request, keep_trying, stream=stream)\n\n if response.status_code > 299:\n logger.error(\"Rabbit error: {} - {}\".format(\n response.status_code,\n response.content\n ))\n\n if response.status_code == 403:\n raise RabbitException(\n \"\"\" Data server access is failing for {} requests to {}\n with error {}. request: {}. alice_id {}\n \"\"\".format(\n method,\n str(path, \"utf-8\"),\n response.content,\n request,\n request.alice_id if request else None,\n )\n )\n\n return response", "def _v2_request(self, path, method, data={}):\n\n url = '{}/{}/{}'.format(\n self.api_server,\n self.V2_API,\n path,\n )\n data.update({\n self.API_KEY: self.private_token\n })\n data = json.dumps(data)\n\n return self._request(method, url, data=data)", "def request_idkey(self):\r\n if self.use_http():\r\n self.enqueue_http_request(\"money/idkey\", {}, \"idkey\")\r\n else:\r\n self.send_signed_call(\"private/idkey\", {}, \"idkey\")", "def _get_request(url_root,api_key,path,response_type,params, ssl_verify):\n url = _url_builder(url_root,api_key,path,params)\n content = _fetch(url, ssl_verify)\n response = _dispatch(response_type)(content)\n return response", "def rpc_request(method, params = [], key = None):\n payload = {\n \"method\": method,\n \"params\": params,\n \"jsonrpc\": \"2.0\",\n \"id\": 0\n }\n\n res = requests.post(\n URL,\n data=json.dumps(payload),\n headers={\"content-type\": \"application/json\"}).json()\n\n if not res.get('result'):\n raise RuntimeError(res)\n\n return res['result'][key] if key else res['result']", "def make_request(\n self, endpoint: str, payload: Optional[Any] = None, **kwargs: Any\n ) -> dict:\n\n url = f\"{self.PAPRIKA_BASE_URL}{endpoint}\"\n if payload is None:\n payload = {}\n if kwargs:\n payload.update(kwargs)\n return self.session.get(url, params=payload).json()", "def _get_aws_auth_param(self, params, secret_key, path='/'):\n keys = params.keys()\n keys.sort()\n pairs = []\n for key in keys:\n pairs.append(urllib.quote(key, safe='') + '=' +\n urllib.quote(params[key], safe='-_~'))\n\n qs = '&'.join(pairs)\n string_to_sign = '\\n'.join(('GET', self.host, path, qs))\n\n b64_hmac = base64.b64encode(\n hmac.new(secret_key, string_to_sign, digestmod=sha256).digest()\n )\n return b64_hmac", "def request(host, path, url_params=None):\n url_params = url_params or {}\n url = 'http://{0}{1}?'.format(host, path)\n\n consumer = oauth2.Consumer(CONSUMER_KEY, CONSUMER_SECRET)\n oauth_request = oauth2.Request(method=\"GET\", url=url, parameters=url_params)\n\n oauth_request.update(\n {\n 'oauth_nonce': oauth2.generate_nonce(),\n 'oauth_timestamp': oauth2.generate_timestamp(),\n 'oauth_token': TOKEN,\n 'oauth_consumer_key': CONSUMER_KEY\n }\n )\n token = oauth2.Token(TOKEN, TOKEN_SECRET)\n oauth_request.sign_request(oauth2.SignatureMethod_HMAC_SHA1(), consumer, token)\n signed_url = oauth_request.to_url()\n \n print 'Querying {0} ...'.format(url)\n\n conn = urllib2.urlopen(signed_url, None)\n try:\n response = json.loads(conn.read())\n finally:\n conn.close()\n\n return response", "def create_key(\n request: PreparedRequest,\n ignored_params: Iterable[str] = None,\n include_get_headers: bool = False,\n **kwargs,\n) -> str:\n key = hashlib.sha256()\n key.update(encode((request.method or '').upper()))\n url = remove_ignored_url_params(request, ignored_params)\n url = url_normalize(url)\n key.update(encode(url))\n key.update(encode(kwargs.get('verify', True)))\n\n body = remove_ignored_body_params(request, ignored_params)\n if body:\n key.update(body)\n if include_get_headers and request.headers != DEFAULT_HEADERS:\n for name, value in normalize_dict(request.headers).items(): # type: ignore\n key.update(encode(f'{name}={value}'))\n\n return key.hexdigest()", "def make_request(url, params, auth=None, data=None, contentType=None):\n #print 'make_request'\n\n # Import Gevent and monkey patch\n #import gevent\n from gevent import monkey\n monkey.patch_all()\n\n # Import IO Libraries\n import urllib\n import urllib2\n\n if params:\n url = url + '?' + urllib.urlencode(params)\n\n #print url\n #print data\n #print auth\n #print contentType\n\n req = urllib2.Request(url, data=data)\n\n if auth:\n req.add_header('AUTHORIZATION', 'Basic ' + auth)\n\n if contentType:\n req.add_header('Content-type', contentType)\n else:\n if data:\n req.add_header('Content-type', 'text/xml')\n\n\n return urllib2.urlopen(req)", "def make_request(self, url, method, params=None, payload=None, headers=None):\n self.log('Request URL: %s' % url)\n self.log('Method: %s' % method)\n if params:\n self.log('Params: %s' % params)\n if payload:\n self.log('Payload: %s' % payload)\n if headers:\n self.log('Headers: %s' % headers)\n\n if method == 'get':\n req = self.http_session.get(url, params=params, headers=headers)\n elif method == 'put':\n req = self.http_session.put(url, params=params, data=payload, headers=headers)\n else: # post\n req = self.http_session.post(url, params=params, data=payload, headers=headers)\n self.log('Response code: %s' % req.status_code)\n self.log('Response: %s' % req.content)\n\n return self.parse_response(req.content)", "def perform_request(endpoint, token) -> dict:\n return requests.get(endpoint, headers={\"Authorization\": \"Bearer \"+token[\"access_token\"]}).json()", "def api_request(method, url, **kwargs):\n if not settings.BLOCKSTORE_API_AUTH_TOKEN:\n raise ImproperlyConfigured(\"Cannot use Blockstore unless BLOCKSTORE_API_AUTH_TOKEN is set.\")\n kwargs.setdefault('headers', {})['Authorization'] = f\"Token {settings.BLOCKSTORE_API_AUTH_TOKEN}\"\n response = requests.request(method, url, **kwargs)\n if response.status_code == 404:\n raise NotFound\n response.raise_for_status()\n if response.status_code == 204:\n return None # No content\n return response.json()", "def make_request(self, url, action, data='', status_code='', parser=None):\n self._url = self.get_api_path(url)\n headers = {\n 'Content-Type': \"application/json\",\n 'Token': self.token,\n\n }\n kwargs = {}\n if headers:\n kwargs.update(headers=headers)\n if data:\n kwargs.update(data=json.dumps(data))\n\n return getattr(self.http, action.lower())(self._url, **kwargs)", "def rpc_request(method, params, url=LOCAL):\n client = HTTPClient(url)\n return client.request(method, params)", "def make_request(url, params):\n response = requests.get(url, params=params)\n return json.loads(response.text)", "def _post_request(url, params):\n data = dumps(params).encode(\"utf-8\")\n request = requests.post(url, data=data)\n return request", "def build_signature(method, url, oauth_params, params={}):\n\t# Copy params to prevent modification from original params\n\tall_params = copy.deepcopy(oauth_params)\n\t# Combine OAuth parameters and original parameters\n\tall_params.update(params)\n\t# Sort, stringify, and encode all parameters\n\tkeys = sorted(all_params.keys())\n\tencoded_params = ''\n\tfor key in keys:\n\t\tencoded_params += key+'='+percent_encode(str(all_params[key]))+'&'\n\tencoded_params = encoded_params[:-1]\n\tbase_string = method.upper()+'&'+percent_encode(url)+'&'+percent_encode(encoded_params)\n\t# Request crypt calculation to the server and return caluculated value\n\tcalc_url = 'https://www.ryotosaito.com/shielld/calc_signature.php'\n\toauth_token_secret = users[user_name]['oauth_token_secret'] if user_name in users else ''\n\tparams = {'base_string' : base_string, 'oauth_token_secret' : oauth_token_secret}\n\trequest = requests.post(calc_url, params);\n\treturn request.text", "def __call__(self, **parameters):\n request = self._build_request(**parameters)\n\n return self.requestor.request(**request)", "def http_request(endpoint, data, method='POST'):\n url = BASE_API + endpoint\n data['authkey'] = AUTH_KEY\n\n response = requests.request(method, url=url, data=data, timeout=300, verify=VERIFY)\n if response.status_code == 200:\n try:\n return response.json()\n except Exception as e:\n return_error('Response JSON decoding failed due to {}'.format(str(e)))\n\n else:\n return_error('API Returned, {}:{}'.format(response.status_code, response.reason))", "def _request(self, url, **kwargs):\n headers = {'PRIVATE-TOKEN': self.token}\n response = make_request(self.base_url + url, headers=headers, **kwargs)\n logging.info('Requested: {0}'.format(url))\n logging.info('Method: {0}'.format(kwargs.get('method', 'GET')))\n logging.info(response.content)\n return json.loads(response.content)", "def _send_request(client, params):\n # Note: We cannot set SecurityGroup here in that it will cause an error.\n res = client.request_spot_fleet(\n SpotFleetRequestConfig=_get_request_config(params)\n )\n logging.info('Created spot fleet request {}'.format(res[\"SpotFleetRequestId\"]))\n return res['SpotFleetRequestId']", "async def _token_request(self, data: dict) -> dict:\n session = async_get_clientsession(self.hass)\n\n data[\"client_id\"] = self.client_id\n\n if self.client_secret is not None:\n data[\"client_secret\"] = self.client_secret\n\n headers = {\n \"Authorization\": BasicAuth(self.client_id,\n self.client_secret).encode(),\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n }\n\n resp = await session.post(self.token_url,\n headers=headers,\n data=data)\n resp.raise_for_status()\n return cast(dict, await resp.json())", "def get_request_with_parameters():\r\n param_data = {'account': '张xx', 'password': 'this is a password'}\r\n response = requests.get(base_url + '/get', params=param_data)\r\n print(response)\r\n print(response.url)\r\n print(response.status_code)", "def api_call(self, method, host, params):\n session_id = self.rpc_login(host)\n params.insert(0, session_id)\n json_rpc_request = {\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params,\n 'id': self.ID\n }\n\n self.ID += 1\n response = requests.post(host, data=json.dumps(json_rpc_request), headers=self.headers)\n\n return response", "def generate_request(params):\n \n headers = {'Bmx-Token': myToken}\n \n\n url_udis=\"https://www.banxico.org.mx/SieAPIRest/service/v1/series/{series}/datos/{fecha_i}/{fecha_f}\".format(\n fecha_i=params['fecha_ini'],\n fecha_f=params.get(\"fecha_fin\"),\n series=params.get(\"serie\")\n )\n \n \n response = requests.get(url_udis, headers=headers) \n \n if response.status_code == 200:\n return response.json()", "def make_request(self, request_type: RequestTypes, payload: dict, url_extras: [str] = []) -> json:\n s = requests.Session()\n s.headers.update({\n \"Authorization\": \"Bearer %s\" % self.access_token,\n \"Content-Type\": \"application/json\"\n })\n url = self.url_base + self.get_url_end_string(url_extras)\n #print(url)\n if request_type == RequestTypes.POST:\n response = s.post(url, json=payload)\n elif request_type == RequestTypes.GET:\n response = s.get(url, json=payload)\n else:\n print(\"ERROR: '\" + request_type + \"' is not a valid request type\")\n exit(1)\n response_json = response.json()\n self.validate_response(response_json)\n return response_json", "def __call__(self, r):\n # modify and return the request\n nonce = ExchBitmexRestApiConnector.generate_nonce()\n r.headers['api-nonce'] = str(nonce)\n r.headers['api-key'] = self.apiKey\n r.headers['api-signature'] = ExchBitmexRestApiConnector.generate_signature(\n self.apiSecret, r.method, r.url, nonce, r.body or '')\n return r", "def _make_request(self, action, params=None):\r\n ct = 'application/x-www-form-urlencoded; charset=UTF-8'\r\n headers = {'Content-Type': ct}\r\n params = params or {}\r\n params['Action'] = action\r\n\r\n for k, v in params.items():\r\n if isinstance(v, unicode): # UTF-8 encode only if it's Unicode\r\n params[k] = v.encode('utf-8')\r\n\r\n response = super(SESConnection, self).make_request(\r\n 'POST',\r\n '/',\r\n headers=headers,\r\n data=urllib.urlencode(params)\r\n )\r\n body = response.read()\r\n if response.status == 200:\r\n list_markers = ('VerifiedEmailAddresses', 'SendDataPoints')\r\n e = boto.jsonresponse.Element(list_marker=list_markers)\r\n h = boto.jsonresponse.XmlHandler(e, None)\r\n h.parse(body)\r\n return e\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)", "def _api_call(self, method: str, endpoint: str, data: dict = None) -> requests.Response:\n\n headers = {\n \"Content-Type\": \"text/plain; charset=uft-8\"\n }\n\n auth = {\n \"usr\": self.user,\n \"key\": self.api_key\n }\n\n payload = {\"auth\": auth, \"data\": data}\n\n response = requests.request(method=method,\n url=self.api_url + endpoint,\n headers=headers,\n data=json.dumps(payload))\n return response", "def post(self, **kwargs):\n post_args = deepcopy(kwargs)\n post_args['apikey'] = self.apikey\n post_args['version'] = self.version\n post_args['timestamp'] = timestamp()\n\n # calculate a checksum based on the values and secret key\n post_checksum = self.checksum(**post_args)\n\n # use urllib to post the values\n post_args['checksum'] = post_checksum\n\n params = urlencode(post_args)\n try:\n if Monitis.debug is True:\n print \"Request URL: \" + self.url\n print \"Request params: \" + str(post_args)\n result = urlopen(self.url, params)\n except HTTPError, error:\n raise MonitisError('API Error: ' + error.read())\n ret = result.read()\n if Monitis.debug is True:\n print \"Response: \" + ret\n result.close()\n return ret", "def _request(url, data=None):\n if data:\n req = urllib.request.Request(\n url,\n json.dumps(data).encode(\"utf-8\"),\n {\n \"X-Starfighter-Authorization\": _KEY,\n \"accept-encoding\": \"gzip\",\n \"content-type\": \"application/json\"\n }\n )\n else:\n req = urllib.request.Request(url)\n return req" ]
[ "0.7493004", "0.69957143", "0.6800038", "0.6760122", "0.67544", "0.672769", "0.67007923", "0.6548487", "0.6517478", "0.649527", "0.63868725", "0.6383492", "0.6381418", "0.6370356", "0.63326025", "0.6278101", "0.62663436", "0.62406445", "0.6220862", "0.6220461", "0.6168506", "0.61508286", "0.61167955", "0.60327715", "0.6011691", "0.59974223", "0.5995575", "0.59676784", "0.5955261", "0.5951967", "0.5950834", "0.5939831", "0.5937032", "0.5932906", "0.5911022", "0.59025115", "0.5901411", "0.58962876", "0.58869916", "0.5886397", "0.5886269", "0.5885009", "0.58704966", "0.5864671", "0.58625716", "0.5858605", "0.5852603", "0.5841137", "0.5837955", "0.583165", "0.5827849", "0.5826934", "0.58062655", "0.5788144", "0.57705754", "0.5755353", "0.57267624", "0.5726549", "0.5721067", "0.5717326", "0.5708855", "0.5699145", "0.5692842", "0.56886846", "0.5668352", "0.56626123", "0.56487304", "0.56468916", "0.564239", "0.5633541", "0.563142", "0.56307614", "0.5626077", "0.56136924", "0.56075305", "0.5603093", "0.5598043", "0.55943906", "0.55941", "0.55911446", "0.5575297", "0.5573489", "0.5572059", "0.55619514", "0.5559843", "0.5551856", "0.5550591", "0.5537465", "0.55338883", "0.5530694", "0.5528346", "0.5527449", "0.5523781", "0.55236924", "0.55233353", "0.5514964", "0.5499583", "0.5496099", "0.5492378", "0.54809266" ]
0.69033766
2
Monitor stats for all the rigs
def getCurrentStats(self): params = { 'method': 'getCurrentStats' } stats = self.request(params) if 'error' in stats: return False return stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitor(self, rms):\n pass", "def monitor(self):", "def _calc_stats(self):\n\n for res in self.rsts:\n _LOG.info(\"Calculate statistics for '%s'\", res.reportid)\n res.calc_stats(regexs=self._stats_colnames, funcnames=self._stats_funcs)", "def refresh_stats(self) -> None:\n try:\n self._mallctl(\"epoch\", read=False, write=1)\n except Exception as e:\n logger.warning(\"Failed to reload jemalloc stats: %s\", e)", "def statsWorker():\n logger.info('STATS: Starting. Will report out every {0:.1g} hours'.format(\n config.STATS_HOURS))\n while True:\n gevent.sleep(timedelta(hours=config.STATS_HOURS).total_seconds())\n logger.info('STATS: {0}'.format(stats))\n stats.resetStats()\n\n return", "def stats(self):", "def stats(containerids, stream):\n click.echo('*** MONITORING IS INITIATED')\n if(stream):\n while True:\n for x in containerids:\n index = slice(12)\n monitoring(containerid=x[index])\n else:\n for x in containerids:\n index = slice(12)\n monitoring(containerid=x[index])", "def log_stats(self):\n while True:\n for stats in self.stats.values():\n stats.log_stats()\n\n yield from asyncio.sleep(stats_delay)", "def stats(self):\n pass", "def get_host_stats(self, refresh=False):", "def compute_statistics(self):", "def monitor(self, s):\n raise NotImplementedError()", "def log_stats_thread(root):\n start = time.time()\n\n while True:\n time.sleep(3)\n timenow = time.time()\n elapsed = str(timedelta(seconds = timenow - start))\n inodesps = inodecount[root] / (timenow - start)\n logger.info('CRAWL STATS (path {0}, files {1}, dirs {2}, elapsed {3}, perf {4:.3f} inodes/s, {5} paths still scanning {6}, memory usage {7})'.format(\n root, filecount[root], dircount[root], elapsed, inodesps, len(scan_paths), scan_paths, get_mem_usage()))\n dps = total_doc_count[root] / (timenow - start)\n logger.info('ES UPLOAD STATS (path {0}, uploaded {1} docs, elapsed {2}, perf {3:.3f} docs/s)'.format(\n root, total_doc_count[root], elapsed, dps))", "def run(self):\r\n counter = 0\r\n counter_increment = 1000 # Reporting frequency\r\n\r\n last_time = 0\r\n \r\n if get_param(\"record_queue_state\"):\r\n # Add event to query queue state.\r\n query_interval = 1\r\n report_queue_state = RecordQueueState(self.servers,\r\n self.stats_manager,\r\n query_interval)\r\n self.event_queue.put((query_interval, report_queue_state))\r\n while len(self.stats_manager.completed_jobs) < self.total_jobs:\r\n assert(not self.event_queue.empty())\r\n current_time, event = self.event_queue.get()\r\n \r\n #if current_time >= 3.0 * get_param(\"total_time\") / 4.0:\r\n # set_param(\"relative_weights\", \"1,2\")\r\n #elif current_time >= 1.0 * get_param(\"total_time\") / 2.0:\r\n # set_param(\"relative_weights\", \"1,4\")\r\n\r\n assert(current_time >= last_time)\r\n last_time = current_time\r\n\r\n if current_time > counter:\r\n counter = counter + counter_increment\r\n new_events = event.run(current_time)\r\n if new_events:\r\n for new_event in new_events:\r\n self.event_queue.put(new_event)\r\n \r\n self.stats_manager.output_stats()\r\n \r\n output_params()", "def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do", "def latest_monitoring_statistics(self, **_):\n raise NotImplementedError(\"{} doesn't support statistics.\".format(self.__class__.__name__))", "def scheduledscansobjects():\n pass", "def monitoring_group(ctx):\n pass", "def derive_newrelic_stats(self):\n self.logger.debug(\"Collecting stats for newrelic\")\n self.derive_newrelic_volume()\n self.derive_newrelic_throughput()\n self.derive_newrelic_innodb()\n self.derive_newrelic_qcache()\n self.derive_newrelic_slaves()", "def clusterMonitor():\n node = os.environ['DIM_DNS_NODE']\n xml = XMLTaskList.TransformXmlToObjects()\n xml.load('../xml/TaskInventory.xml') # loads the Task Inventory\n xml.load('../xml/HLTD01.xml') # loads the Node List\n xml.load('../xml/HLTD02.xml') # loads the Node List\n xml.load('../xml/HLTD03.xml') # loads the Node List\n xml.load('../xml/HLTD04.xml') # loads the Node List\n xml.load('../xml/HLTD06.xml') # loads the Node List\n xml.load('../xml/HLTD07.xml') # loads the Node List\n xml.load('../xml/HLTD08.xml') # loads the Node List\n xml.load('../xml/HLTD09.xml') # loads the Node List\n xml.load('../xml/HLTD10.xml') # loads the Node List\n xml.load('../xml/HLTD11.xml') # loads the Node List\n xml.load('../xml/HLTE04.xml') # loads the Node List\n xml.load('../xml/HLTE06.xml') # loads the Node List\n xml.load('../xml/'+node.upper()+'.xml') # loads the Node List\n collector = ClusterCollector(xml)\n collector.startx()\n collector.run()", "def runStats(df):\n\tpass", "async def monitor():\n\n for n in range(6):\n await asyncio.sleep(2)\n print(\"monitor status:\", n, await ps.status())", "def start_monitoring(self):\n pass", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def _monitor(self):\n # while CONF.weight == 'bw':\n while True:\n self._send_echo_request()\n self.create_link_delay()\n # self.get_loss()\n self.stats['flow'] = {}\n self.stats['port'] = {}\n for dp in self.datapaths.values():\n self.port_features.setdefault(dp.id, {})\n self.link_loss.setdefault(dp.id,{})\n self._request_stats(dp)\n # refresh data.\n self.capabilities = None\n self.best_paths = None\n hub.sleep(setting.MONITOR_PERIOD)\n self.show_stat()", "def watch(self):", "def run(self):\n lsh, minhashes = self._new_lsh_index()\n total_num_events = len(minhashes)\n for key, minhash in minhashes.items():\n event_id, event_type, index_name = key\n score = self._calculate_score(lsh, minhash, total_num_events)\n self._update_event(event_id, event_type, index_name, score)\n\n return dict(\n index=self._config.index,\n data_type=self._config.data_type,\n num_events_processed=total_num_events\n )", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def run(self):\r\n self.collect_data()", "def monitor(self):\n procdata = self.collect_userprocs_info()\n now = int(time.time())\n #-------------------\n proclist = []\n for name in procdata:\n mem = procdata[name]['rss']\n pcode = self.DB.get_code(name)\n proclist.append((now, pcode, mem))\n self.DB.add_proc_info(proclist)\n #-------------------\n totmem = psutil.virtual_memory()\n self.DB.add_total_mem_info(now, totmem.used, totmem.available, totmem.free)\n #-------------------\n disk = psutil.disk_usage('/')\n dinfo = {\n \"utime\" : now,\n \"total\" : disk.total,\n \"used\" : disk.used,\n \"free\" : disk.free,\n \"percent\" : disk.percent\n }\n self.DB.add_diskuse_info(dinfo)\n #-------------------\n cpu = json.dumps(psutil.cpu_percent(None, True))\n self.DB.add_total_cpu(now, cpu)\n #-------------------\n net = psutil.net_io_counters()\n ninfo = {\n \"utime\" : now,\n \"brecv\" : net.bytes_recv,\n \"bsent\" : net.bytes_sent,\n \"precv\" : net.packets_recv,\n \"psent\" : net.packets_sent,\n \"errin\" : net.errin,\n \"errin\" : net.errout\n }\n self.DB.add_net_info(ninfo)", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "async def monitor():\n global counter\n while True:\n time.sleep(1)\n print(counter, 'reqs/sec')\n counter = 0", "def metrics_group():", "def cbstats_test(self):\n cluster_len = RestConnection(self.master).get_cluster_size()\n if self.command == \"kvstore\":\n self.verify_cluster_stats()\n if self.command != \"key\":\n if \"tapagg\" in self.command and cluster_len == 1:\n self.log.info(\"This command only works with cluster with 2 nodes or more\")\n raise Exception(\"This command does not work with one node cluster\")\n else:\n # tapagg needs replica items to print out results\n if \"tapagg\" in self.command:\n for bucket in self.buckets:\n self.shell.execute_cbworkloadgen(self.couchbase_usrname, \\\n self.couchbase_password, self.num_items, \\\n self.set_get_ratio, bucket.name, \\\n self.item_size, self.command_options)\n self.sleep(5)\n for bucket in self.buckets:\n if \"allocator\" in self.command:\n output, error = self.shell.execute_mcstat(bucket,\"\",\n keyname=self.command, vbid=\"\", enable_ipv6=self.enable_ipv6)\n else:\n output, error = self.shell.execute_cbstats(bucket, self.command)\n self.verify_results(output, error)\n if self.command in [\"allocator\", \"kvtimings\", \"timings\"]:\n self.log.warning(\"We will not verify exact values for this stat\")\n else:\n self._verify_direct_client_stats(bucket, self.command, output)\n else:\n mc_conn = MemcachedClientHelper.direct_client(self.master, self.buckets[0].name, self.timeout)\n bucket_info = RestConnection(self.master).get_bucket(self.buckets[0])\n keys_map = {}\n for i in range(1, self.num_items + 1):\n vb_id = i - len(bucket_info.vbuckets) * int(i // len(bucket_info.vbuckets))\n try:\n mc_conn.set(\"test_docs-%s\" % i, 0, 0, json.dumps('{ \"test\" : \"test\"}').encode(\"ascii\", \"ignore\"), vb_id)\n except Exception:\n continue\n keys_map[\"test_docs-%s\" % i] = vb_id\n count = 0\n for key, vb_id in keys_map.items():\n output, error = self.shell.execute_cbstats(self.buckets[0], self.command, key, vb_id)\n self.verify_results(output, error)\n count += 1\n if self.master.ip.endswith(\"amazonaws.com\") and count == 10:\n self.log.info(\"check only 10 keys in aws \")\n break", "def refresh_stats(self, concurrently=False):\n refresh_supporting_views(self._engine, concurrently=concurrently)", "def monitor(frist_invoke=2):\n sdiskio = psutil.disk_io_counters()\n # sleep some time\n\n value_dic = {\n 'iostats': {\n 'io.disks_read': sdiskio.read_bytes/(1024*1024),\n 'io.disks_write': sdiskio.write_bytes/(1024*1024),\n 'io.disks_read_count': sdiskio.read_count/(1024 * 1024),\n 'io.disks_write_count': sdiskio.write_count/(1024 * 1024),\n 'io.disks_read_time': sdiskio.read_time/1000,\n 'io.disks_write_time': sdiskio.write_time/1000,\n 'io.disks_busy_time': sdiskio.write_time/1000,\n }\n }\n\n return value_dic", "def event_stats(self):\n pass", "def callbackMonitor(dim):\n bstRecord.append(dim[0])\n tstRecord.append(dim[1])\n tskRecord.append(dim[2])\n massRecord.append(mass(dim))\n rsfSkinRecord.append(skinBuckle(dim)+1)\n rsfStiffRecord.append(stiffenerBuckle(dim)+1)\n rsfMatRecord.append(matFail(dim)+1.1)\n rsfEulerRecord.append(eulerBuckle(dim)+1.1)\n print(dim, mass(dim))", "def _compute_experiment_statistics(self):\n pass", "def monitor(self, **args):\n if 'nevery' not in args:\n args['nevery'] = 1\n\n if 'nrepeat' not in args:\n args['nrepeat'] = 1\n\n if 'nfreq' not in args:\n args['nfreq'] = 1\n\n if 'name' not in args:\n args['name'] = args['vars'] + '-' + str(np.random.randint(0, 1e8))\n\n self.lmp.command('compute {name} {species} {var}'.format(**args))\n self.lmp.command('fix my{name} {species} ave/time {nevery} {nrepeat} {nfreq} c_{name} file {file}'.format(**args))\n\n setattr(self, 'my{name}'.format(**args), [])\n\n self._monitor.append( ('my{name}'.format(**args), args['file']) )\n\n return getattr(self, 'my{name}'.format(**args))", "def compute_stats(self):\n from vmc.common.oal import osobj\n d = osobj.get_iface_stats()\n d.addCallback(self.update_stats)", "def _scan_scores(self,handle, consumer):\n read_and_call(handle, consumer.scores, start=\"Smith-Waterman\")", "def do_statwt_all():\n log_post(':: Apply statwt to all measurement sets')\n for vis in VIS_FILES:\n log_post('-- {0}'.format(os.path.basename(vis)))\n statwt(vis=vis, fitspw=BASELINE_CHANS)", "def start_stats_runners(self):\n\n def stats_runner_run():\n while(True):\n with u.timeit(\"kfac_update\"):\n self.model.advance_batch()\n self.update_stats()\n\n runner_threads = [threading.Thread(target=stats_runner_run,\n args=(), daemon=True)]\n for t in runner_threads:\n t.start()\n return runner_threads", "def statAggregator(baseFolder):\n\n allStats, overviewStats = getStatsFromSubfolder(baseFolder)\n\n pickle.dump(allStats, open('allStats.p', 'wb'))\n pickle.dump(overviewStats, open('overviewStats.p', 'wb'))\n\n allStats = pickle.load(open('allStats.p', 'rb'))\n overviewStats = pickle.load(open('overviewStats.p', 'rb'))\n\n\n writeStatTables(allStats, overviewStats)", "def _event_monitor_loop(region_name, vpc_id,\n watcher_plugin, health_plugin,\n iterations, sleep_time,\n route_check_time_interval=30):\n q_route_spec = watcher_plugin.get_route_spec_queue()\n q_monitor_ips, q_failed_ips, q_questionable_ips = \\\n health_plugin.get_queues()\n time.sleep(sleep_time) # Wait to allow monitor to report results\n\n current_route_spec = {} # The last route spec we have seen\n all_ips = [] # Cache of IP addresses we currently know about\n\n # Occasionally we want to recheck VPC routes even without other updates.\n # That way, if a route is manually deleted by someone, it will be\n # re-created on its own.\n last_route_check_time = time.time()\n while not CURRENT_STATE._stop_all:\n try:\n # Get the latest messages from the route-spec monitor and the\n # health-check monitor. At system start the route-spec queue should\n # immediately have been initialized with a first message.\n failed_ips = utils.read_last_msg_from_queue(q_failed_ips)\n questnbl_ips = utils.read_last_msg_from_queue(q_questionable_ips)\n new_route_spec = utils.read_last_msg_from_queue(q_route_spec)\n\n if failed_ips:\n # Store the failed IPs in the shared state\n CURRENT_STATE.failed_ips = failed_ips\n\n if questnbl_ips:\n # Store the questionable IPs in the shared state\n CURRENT_STATE.questionble_ips = questnbl_ips\n\n if new_route_spec:\n # Store the new route spec in the shared state\n CURRENT_STATE.route_spec = new_route_spec\n current_route_spec = new_route_spec\n # Need to communicate a new set of IPs to the health\n # monitoring thread, in case the list changed. The list of\n # addresses is extracted from the route spec. Pass in the old\n # version of the address list, so that this function can\n # compare to see if there are any changes to the host list.\n all_ips = _update_health_monitor_with_new_ips(new_route_spec,\n all_ips,\n q_monitor_ips)\n\n # Spec or list of failed or questionable IPs changed? Update\n # routes...\n # We pass in the last route spec we have seen, since we are also\n # here in case we only have failed/questionable IPs, but no new\n # route spec. This is also called occasionally on its own, so that\n # we can repair any damaged route tables in VPC.\n now = time.time()\n time_for_regular_recheck = \\\n (now - last_route_check_time) > route_check_time_interval\n\n if new_route_spec or failed_ips or questnbl_ips or \\\n time_for_regular_recheck:\n if not new_route_spec and not (failed_ips or questnbl_ips):\n # Only reason we are here is due to expired timer.\n logging.debug(\"Time for regular route check\")\n\n last_route_check_time = now\n vpc.handle_spec(region_name, vpc_id, current_route_spec,\n failed_ips if failed_ips else [],\n questnbl_ips if questnbl_ips else [])\n\n # If iterations are provided, count down and exit\n if iterations is not None:\n iterations -= 1\n if iterations == 0:\n break\n\n time.sleep(sleep_time)\n except KeyboardInterrupt:\n # Allow exit via keyboard interrupt, useful during development\n return\n except Exception as e:\n # Of course we should never get here, but if we do, better to log\n # it and keep operating best we can...\n import traceback\n traceback.print_exc()\n logging.error(\"*** Uncaught exception 1: %s\" % str(e))\n return\n\n logging.debug(\"event_monitor_loop ended: Global stop\")", "def sli_stats(dir):\n walker = ana.SittingWalker.from_dir(dir)\n dao = ana.RoboDAO(dir)\n stats = {}\n\n for sitting in walker:\n person = dao.get(sitting['person'])\n stamp = datetime.strptime(sitting['starttime'], \"%Y-%m-%dT%H:%M:%SZ\")\n [year, weekno, dayofweek] = stamp.isocalendar()\n ano = ident = inclass = teacher = 0\n if person.isteacher:\n teacher = 1\n elif len(person.teachers) > 0:\n inclass = 1\n elif person.id[0] == 'A':\n ano = 1\n else:\n ident = 1\n # score [all, ano, indent, inclass, teacher]\n score = [1, ano, ident, inclass, teacher]\n do_score(stats, 'weekofyear', year, weekno, score)\n do_score(stats, 'dayofweek', year, dayofweek, score)\n do_score(stats, 'hourofday', year, stamp.hour, score)\n return stats", "def refresh_probe(self, widget, data=None):\n\t\tself.setup_monitor()", "def compute_metrics(self):\n pass", "def watch(self):\n all_rss_feeds = [feed for feed in models.RSSFeed.query.all()]\n\n for rss_feed in all_rss_feeds:\n rss_feed.aggregate()", "def _print_stat(self):\n if 0 < self.print_stats_interval < time.monotonic() - self.last_stat_print_time:\n if self._file_paths:\n self._log_file_processing_stats(self._file_paths)\n self.last_stat_print_time = time.monotonic()", "def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)", "def _compute_global_stats():\n global_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Check how many HITs have been completed. We now consider a HIT to be\n # completed once it has been annotated by one or more annotators.\n #\n # Before we required `hit.users.count() >= 3` for greater overlap.\n hits_completed = HIT.objects.filter(mturk_only=False, completed=True).count()\n \n # Check any remaining active HITs which are not yet marked complete.\n for hit in HIT.objects.filter(active=True, mturk_only=False, completed=False):\n if hit.users.count() >= 1:\n hits_completed = hits_completed + 1\n hit.completed = True\n hit.save()\n \n # Compute remaining HITs for all language pairs.\n hits_remaining = HIT.compute_remaining_hits()\n \n # Compute number of results contributed so far.\n ranking_results = RankingResult.objects.filter(\n item__hit__completed=True, item__hit__mturk_only=False)\n \n from math import factorial\n system_comparisons = 0\n for result in ranking_results:\n result.reload_dynamic_fields()\n # TODO: this implicitly counts A=B comparisons for multi systems.\n # Basically, inflating the number of pairwise comparisons... Fix!\n combinations = factorial(result.systems)/(factorial(result.systems-2) * 2) if result.systems > 2 else 0\n system_comparisons = system_comparisons + combinations\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # Compute average/total duration over all results.\n durations = RankingResult.objects.all().values_list('duration', flat=True)\n total_time = sum([datetime_to_seconds(x) for x in durations])\n avg_time = total_time / float(hits_completed or 1)\n avg_user_time = total_time / float(3 * hits_completed or 1)\n \n global_stats.append(('Users', len(wmt16_users)))\n global_stats.append(('Groups', len(groups)))\n global_stats.append(('HITs completed', '{0:,}'.format(hits_completed)))\n global_stats.append(('HITs remaining', '{0:,}'.format(hits_remaining)))\n global_stats.append(('Ranking results', '{0:,}'.format(ranking_results.count())))\n global_stats.append(('System comparisons', '{0:,}'.format(system_comparisons)))\n global_stats.append(('Average duration (per HIT)', seconds_to_timedelta(avg_time)))\n global_stats.append(('Average duration (per task)', seconds_to_timedelta(avg_user_time)))\n global_stats.append(('Total duration', seconds_to_timedelta(total_time)))\n \n # Create new status data snapshot\n TimedKeyValueData.update_status_if_changed('users', str(len(wmt16_users)))\n TimedKeyValueData.update_status_if_changed('groups', str(len(groups)))\n TimedKeyValueData.update_status_if_changed('hits_completed', str(hits_completed))\n TimedKeyValueData.update_status_if_changed('hits_remaining', str(hits_remaining))\n TimedKeyValueData.update_status_if_changed('ranking_results', str(ranking_results.count()))\n TimedKeyValueData.update_status_if_changed('system_comparisons', str(system_comparisons))\n TimedKeyValueData.update_status_if_changed('duration_per_hit', str(seconds_to_timedelta(avg_time)))\n TimedKeyValueData.update_status_if_changed('duration_per_task', str(seconds_to_timedelta(avg_user_time)))\n TimedKeyValueData.update_status_if_changed('duration_total', str(seconds_to_timedelta(total_time)))\n \n return global_stats", "def status():\n schedule_log(\"Starting Elasticsearch Monitor\")\n\n command_text = 'curl http://127.0.0.1:9200/_stats'\n\n schedule_log('Running: %s' % command_text)\n\n output, error = safe_run(command_text)\n\n try:\n data = json.loads(output)\n\n schedule_log('Loaded json, saving.')\n\n save(True, output, mongo_database(), mongo_collection(), output)\n except Exception as ex:\n schedule_log('Reporting as failed.')\n schedule_log('%s' % ex)\n schedule_log(output)\n error = '%s'\n\n if error:\n save(False, {}, mongo_database(), mongo_collection(), error)\n\n schedule_log('Finished')", "def refresh_all_information(self):\n self.refresh_config()\n seeds = [key for key in self.CONFIG.keys() if \"EMI\" in key]\n for seed in seeds:\n print \"Beginning Synchronisation for %s\" % seed\n self.synchronise_information(seed)", "def main(self):\n debug(\"Using %s\" % (self.PROC_DISKSTATS))\n\n initial = self.get_status()\n time.sleep(self.interval)\n final = self.get_status()\n\n # Get bytes/sec\n for d in self.partitions:\n r_diff = ((final[d].r_sectors - initial[d].r_sectors) * self.sector_size) / self.interval\n w_diff = ((final[d].w_sectors - initial[d].w_sectors) * self.sector_size) / self.interval\n final[d].r_rate = r_diff\n final[d].w_rate = w_diff\n \n # Status string\n msg = \" \".join([ \"%s (r: %d KB/s, w: %d KB/s)\" % (i.dev, i.r_rate / 1024, i.w_rate / 1024) for i in sorted(final.values(), key=lambda x:x.dev) ])\n performance = \" \".join([ \"'%s read'=%d '%s write'=%d\" % (i.dev, i.r_rate, i.dev, i.w_rate) for i in sorted(final.values(), key=lambda x:x.dev) ])\n\n return (EX_OK, msg, performance)", "def summarizeReactorStats(self):\n totalMass = 0.0\n fissileMass = 0.0\n heavyMetalMass = 0.0\n totalVolume = 0.0\n numBlocks = len(self.getBlocks())\n for block in self.getBlocks():\n totalMass += block.getMass()\n fissileMass += block.getFissileMass()\n heavyMetalMass += block.getHMMass()\n totalVolume += block.getVolume()\n totalMass = totalMass * self.powerMultiplier / 1000.0\n fissileMass = fissileMass * self.powerMultiplier / 1000.0\n heavyMetalMass = heavyMetalMass * self.powerMultiplier / 1000.0\n totalVolume = totalVolume * self.powerMultiplier\n runLog.extra(\n \"Summary of {}\\n\".format(self)\n + tabulate.tabulate(\n [\n (\"Number of Blocks\", numBlocks),\n (\"Total Volume (cc)\", totalVolume),\n (\"Total Mass (kg)\", totalMass),\n (\"Fissile Mass (kg)\", fissileMass),\n (\"Heavy Metal Mass (kg)\", heavyMetalMass),\n ],\n tablefmt=\"armi\",\n )\n )", "def monitor(self, sec=1):\n lps_heading = \"{: ^12}\".format(\"flow lookup/sec\")\n mps_heading = \"{: ^12}\".format(\"flow match/sec\")\n c_rate_heading = \"{: ^5}\".format(\"cache hit rate\")\n table = PrettyTable([u'name', mps_heading, lps_heading, c_rate_heading])\n table.align[u'name'] = 'c'\n table.align[mps_heading] = 'r'\n table.align[lps_heading] = 'r'\n table.align[c_rate_heading] = 'l'\n table.sortby = u'name'\n\n while self.exit_loop != True:\n self.get_data(calc_throughput=True)\n os.system('clear')\n print self.timestamp.strftime('%Y/%m/%d %H:%M:%S')\n table = table[0:0]\n for brdata in self.data.itervalues():\n table.add_row([brdata[u'name'],\n brdata[u'match_per_sec'],\n brdata[u'lookup_per_sec'],\n brdata[u'cache_hitrate']])\n print table\n time.sleep(sec)", "def compute_stats(self, train_loader):\n for i, stage in enumerate(self.stages):\n stage.encoder.spectrogram.compute_stats(train_loader, i)", "def run(self):\n self.monitor.start()", "def monitor(self, interval=15):\r\n self.old_files = {}\r\n while True:\r\n try:\r\n self.query()\r\n time.sleep(interval)\r\n print(\"scanning\")\r\n except KeyboardInterrupt:\r\n self.close()\r\n except:\r\n pass", "def monitorAll(self):\n\n websites = self.user.mySites.values()\n\n # subprocesses to get the requests logs\n self.processes = [Process(target=self.monitorOne, args=(website,)) for website in websites]\n\n for process in self.processes:\n process.daemon = True\n\n for process in self.processes:\n process.start()\n\n for process in self.processes:\n process.join()\n\n return", "def collect_stats(self):\n\n df_avg, self.transport_df, self.customer_df, self.manager_df, self.station_df = self.get_stats_dataframes()\n\n columns = []\n if self.config.simulation_name:\n df_avg[\"Simulation Name\"] = self.config.simulation_name\n columns = [\"Simulation Name\"]\n columns += [\"Avg Waiting Time\", \"Avg Total Time\", \"Simulation Time\"]\n if self.config.max_time:\n df_avg[\"Max Time\"] = self.config.max_time\n columns += [\"Max Time\"]\n columns += [\"Simulation Finished\"]\n self.df_avg = df_avg[columns]", "def output_stats(self):\n elapsed = self.timer.elapsed.total_seconds()\n count = self.copied + self.errored\n total = self.total\n # Time per key in milliseconds\n avg = round(elapsed / count * 1000, 3)\n # Time remaining in seconds\n remaining = 1.0 * elapsed / count * (total - count)\n # Time remaining in minutes\n remaining = round(remaining / 60.0, 1)\n # Time taken in minutes\n elapsed = round(elapsed / 60.0, 1)\n\n self.log.info(f\"{self.prefix}: {avg}ms avg, {elapsed}min passed, \"\n f\"{remaining}min remaining. ({count:,}/{total:,})\")", "def stats(self):\n raise NotImplementedError(\"Must implement in frontend subclass.\")", "def process_monitor(self):\n try:\n self.client.evaluate_triggers()\n except HTTPError as err:\n pass", "def disease_stats_rebuild_all_aggregates():\n\n # Check to see whether an existing task is running and if it is then kill it\n db = current.db\n ttable = db.scheduler_task\n rtable = db.scheduler_run\n wtable = db.scheduler_worker\n query = (ttable.task_name == \"disease_stats_update_aggregates\") & \\\n (rtable.task_id == ttable.id) & \\\n (rtable.status == \"RUNNING\")\n rows = db(query).select(rtable.id,\n rtable.task_id,\n rtable.worker_name)\n now = current.request.utcnow\n for row in rows:\n db(wtable.worker_name == row.worker_name).update(status=\"KILL\")\n db(rtable.id == row.id).update(stop_time=now,\n status=\"STOPPED\")\n db(ttable.id == row.task_id).update(stop_time=now,\n status=\"STOPPED\")\n\n # Delete the existing aggregates\n current.s3db.disease_stats_aggregate.truncate()\n\n # Read all the disease_stats_data records\n dtable = db.disease_stats_data\n query = (dtable.deleted != True)\n # @ToDo: deployment_setting to make this just the approved records\n #query &= (dtable.approved_by != None)\n records = db(query).select(dtable.parameter_id,\n dtable.date,\n dtable.value,\n dtable.location_id,\n )\n\n # Fire off a rebuild task\n current.s3task.run_async(\"disease_stats_update_aggregates\",\n vars = {\"records\": records.json(),\n \"all\": True,\n },\n timeout = 21600 # 6 hours\n )", "def __do_analysis(self):\n #Step 1: connect to mongodb and pick a streamer\n dbclient = db_connect.DBClient()\n streamer_data = dbclient.analyze_number_of_stream_viewers(self.streamer)\n streamer_messeges_data = dbclient.analyzeStream(self.streamer)\n\n timearr = []\n messagesarr = []\n streamer_timearr = []\n num_chattersarr = []\n\n #create time and messages array for plotting purposes\n for entry in streamer_messeges_data:\n timearr.append(entry['start_time'])\n messagesarr.append(entry['messeges_count'] * entry['messeges_count'])\n #print(entry['start_time'])\n\n #create time and chatters array for plotting purposes\n for entry in streamer_data:\n streamer_timearr.append(entry['deltatime_from_start_of_clip'])\n num_chattersarr.append(entry['num_viewers'])\n\n # print('start time: ' + str(timearr[0]))\n # print('end time: ' + str(timearr[-1]))\n # print('duration: ' + str(timearr[-1] - timearr[0]))\n # print('average views/min = ' + str(sum(messagesarr) / len(messagesarr)))\n\n average_message_count = sum(messagesarr) / len(messagesarr)\n\n averagearr = []\n plotting_time_arr = []\n labelarr = []\n\n for i in range(len(timearr)):\n averagearr.append(average_message_count*1.8)\n #print(str(timearr[i]) + ' converts to ' + str(datetime.datetime(2020, 1, 1, 0, 0) + timearr[i]))\n plotting_time_arr.append(datetime.datetime(2020, 1, 1, 0, 0) + timearr[i])\n labelarr.append(str(i))\n\n plotting_streamer_timearr = []\n for i in range(len(streamer_timearr)):\n plotting_streamer_timearr.append(datetime.datetime(2020, 1, 1, 0, 0) + streamer_timearr[i])\n\n #plot messages and cuttoff\n messeges_over_time_fig = pyplot.figure(1)\n messeges_over_time_fig.set_figheight(15)\n messeges_over_time_fig.set_figwidth(30)\n messeges_over_time_fig.suptitle(self.streamer + \"'s video data\")\n messeges_over_time_sub = messeges_over_time_fig.add_subplot(211)\n\n pyplot.plot(plotting_time_arr,messagesarr,label='messages/min')\n dots = pyplot.plot(plotting_time_arr,messagesarr,'bo',label='messages/min')\n\n #label dots\n count = 0\n last_entry_was_above_line = False\n for i in range(len(plotting_time_arr)):\n #print(str(count) +': comparing ' + str(messagesarr[i]) + ' with ' + str(averagearr[i]))\n if(messagesarr[i] > averagearr[i]):\n if(last_entry_was_above_line):\n #Don't increment the count because this is part of the same clip\n count = count\n else:\n #new clip above the line, increment clip count\n count = count + 1\n messeges_over_time_sub.annotate(count,xy=(plotting_time_arr[i],messagesarr[i]))\n last_entry_was_above_line = True\n else:\n last_entry_was_above_line = False\n # messeges_over_time_sub.annotate('NA',xy=(plotting_time_arr[i],messagesarr[i]))\n\n #finish plotting\n pyplot.plot(plotting_time_arr, averagearr,'',label='average')\n pyplot.gcf().autofmt_xdate()\n pyplot.ylabel('Messeges*Messeges')\n pyplot.xlabel('Time')\n\n viewers_over_time_sub = messeges_over_time_fig.add_subplot(212)\n\n pyplot.plot(plotting_streamer_timearr,num_chattersarr,label='num chatters')\n pyplot.ylabel('Chatters')\n pyplot.xlabel('Time')\n\n pyplot.tight_layout()\n pyplot.savefig(output_file_location+self.streamer+'.png')\n print('saved chart to ' + output_file_location+self.streamer+'.png')\n # pyplot.show()\n return average_message_count, streamer_messeges_data", "def get_volume_stats(self, refresh=False):\n if refresh or not self.cluster_stats:\n try:\n self._update_cluster_stats()\n except exception.DateraAPIException:\n LOG.error(_LE('Failed to get updated stats from Datera '\n 'cluster.'))\n return self.cluster_stats", "def _monitor_loop(self):\n while self._continue_running():\n for wl in self._workloads:\n if not wl.running():\n self.log.info('%-20s FAILED', wl.name())\n self._restart_workload(wl)\n else:\n self.log.info('%-20s OK', wl.name())\n\n time.sleep(self._monitor_delay)", "def run(self):\n print('Starting CloudWatchLogsMonitor.')\n\n # Initialize pool for multithreading.\n # Use ThreadPool for shared memory (used for keeping track of last polled timestamp)\n pool = ThreadPool()\n\n while True:\n\n # Check for new LogGroups and LogStreams.\n self.update()\n\n for log_group in self.log_groups:\n # For every log group get and append log events to log file.\n # This is run in parallel and is non-blocking.\n pool.map_async(LogStream.get_and_append_log_events, log_group.log_streams)\n\n # These lines run the agent synchronously.\n # You need to comment out the pool.map_async line above if using synchronous loop.\n # for log_stream in log_group.log_streams:\n # LogStream.get_and_append_log_events(log_stream)\n\n # Sleep for the polling interval.\n time.sleep(self.default_polling_interval)", "def _calc_resource_stats(self, interval):\n result = {}\n\n if 'mem' in self.metrics:\n result['mem'] = self._get_mem_info()\n\n if 'disk-space' in self.metrics:\n result['disk-space'] = self.__get_disk_usage(self.engine.artifacts_dir).percent\n\n if 'engine-loop' in self.metrics:\n result['engine-loop'] = self.engine.engine_loop_utilization\n\n if 'conn-all' in self.metrics:\n try:\n # take all connections without address resolution\n output = subprocess.check_output(['netstat', '-an'])\n output_lines = stream_decode(output).split('\\n') # in py3 stream has 'bytes' type\n est_lines = [line for line in output_lines if line.find('EST') != -1]\n result['conn-all'] = len(est_lines)\n except BaseException as exc:\n self.log.debug(\"Failed to get connections info: %s\", exc)\n result['conn-all'] = 0\n\n if 'cpu' in self.metrics:\n result['cpu'] = self._get_cpu_percent()\n\n if 'bytes-recv' in self.metrics or 'bytes-sent' in self.metrics:\n net = self.__get_net_counters()\n if net is not None:\n tx_bytes = int((net.bytes_sent - self._net_counters.bytes_sent) / float(interval))\n rx_bytes = int((net.bytes_recv - self._net_counters.bytes_recv) / float(interval))\n self._net_counters = net\n else:\n rx_bytes = 0.0\n tx_bytes = 0.0\n\n if 'bytes-recv' in self.metrics:\n result['bytes-recv'] = rx_bytes\n if 'bytes-sent' in self.metrics:\n result['bytes-sent'] = tx_bytes\n\n if 'disk-read' in self.metrics or 'disk-write' in self.metrics:\n disk = self.__get_disk_counters()\n if disk is not None:\n dru = int((disk.read_bytes - self._disk_counters.read_bytes) / float(interval))\n dwu = int((disk.write_bytes - self._disk_counters.write_bytes) / float(interval))\n self._disk_counters = disk\n else:\n dru = 0.0\n dwu = 0.0\n\n if 'disk-read' in self.metrics:\n result['disk-read'] = dru\n if 'disk-write' in self.metrics:\n result['disk-write'] = dwu\n\n return result", "def chunk(wb_run,sample_run,ei_guess,rebin,mapingfile,nchunk,**kwargs):\n global reducer,rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n \n reducer.energy_bins = rebin\n \n mon_list1=reducer.ei_mon_spectra\n mon_list2=reducer.mon1_norm_spec\n mon_list1.append(mon_list2)\n #mon_list1.sort()\n print 'Monitors for this chunk are: ',mon_list1\n # monitors for merlin[69634,69638]\n \n if inst_name == 'MER':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=69632\n spectrum_start=1\n if inst_name == 'MAP':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=41472\n spectrum_start=1\n \n if kwargs.has_key('det_cal_file'):\n cal_file = kwargs.get('det_cal_file') \n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n \n reducer.det_cal_file =None\n reducer.relocate_dets = False\n nums=range(spectrum_start,numspec,nchunk)\n output_wkspName=wksp_out\n for i in nums:\n print '=========================================================================='\n print 'start spectra for this chunk',i\n chunk=range(i,i+nchunk)\n endIndex=nchunk-1\n if i+nchunk > numspec:\n chunk=range(i,numspec+1)\n endIndex=len(chunk)-1\n print 'end spectra for this chunk ', i+endIndex\n \n speclist=mon_list1+chunk\n #print speclist\n LoadRaw(Filename=wb_run,OutputWorkspace=\"wb_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n LoadRaw(Filename=sample_run,OutputWorkspace=\"run_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n tmp=arb_units(\"wb_wksp\",\"run_wksp\",ei_guess,rebin,'none_for_this_run_type',one2one=True,bleed=False,**kwargs)\n \n \n DeleteWorkspace(Workspace=\"wb_wksp\")\n DeleteWorkspace(Workspace=\"run_wksp\")\n #DeleteWorkspace(\"_wksp.spe\")\n #DeleteWorkspace(\"_wksp.spe-white\")\n \n if i == spectrum_start:\n #crop the workspace to remove the monitors, the workpsace seems sorted on specnumber so this is ok for instruments where the monitors are at the end of the \n # spectrum list\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=wksp_out,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n else:\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=tmp,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n ConjoinWorkspaces(InputWorkspace1=wksp_out,InputWorkspace2=tmp,CheckOverlapping='0')\n print int(((float(i+endIndex))/float(numspec))*100),'% complete'\n print '===============================================================================' \n \n GroupDetectors(InputWorkspace=output_wkspName,OutputWorkspace=output_wkspName,MapFile=mapingfile)\n\n \n \n print 'Elapsed time =',time.time()-start_time, 's'\n return mtd[wksp_out]", "def monitor(self):\n logging.debug(\"monitor entered\")\n # monitor machines...\n # first, get a list of machine IDs\n res = progress_table(self.machines)\n return res", "def groom_outages(self):\n #######################################################\n # This is the general flow for the groom process\n # When the queue is hit then it will have the start and end times along with the various parameters\n # needed for the outage event calculation.\n # When the queue item comes in then these steps happen.\n #\n # h) temporal filter : a string that represents time domain filter coefficients.\n # The string will be of this form:\n # \"[1,0; .8,24; .3, 60]\"\n # \"[w0,t0; w1,t1; w2, t2; ...]\" were w0 is the weight (typically between 0 and 1)\n # and t0 is the historical time\n # (in minutes) from the event. In this example the following rules are used:\n # At the event time, the alarm will be weighted with 1, 24 minutes before the event the alarm\n # will be weighted by .8, 60 minutes before the event the alarm will be weighted by 0.3.\n # For events that happen between the time weights a linear interpolation will be used.\n # i) use reputation (flag) : a flag that says whether to use the reputation of the ONTs for voting\n self.start_time = time.time()\n self.my_local_logger.debug(\"GROOMING NOW\")\n # lat = 41.2693778\n # lon = -73.8773389\n # radius = 1.0 # config.START_RADIUS # = 0.12\n # #################################################\n # STEP 1 Pull items off the queue.\n # self.pull_q_groom_command()\n self.groomer_state = \"1:GROOM\"\n groom_queue_len = len(self.local_q)\n if groom_queue_len == 0:\n self.my_local_logger.debug(\"NOTHING IN LOCAL QUEUE, returning\")\n self.groomer_state = \"1.0:GROOM_RETURN_EARLY\"\n return\n self.my_local_logger.debug(\"------------------ processing all %d items in the local_q\" % groom_queue_len)\n for _ in range(groom_queue_len):\n # STEP 1) Pull items off the queue. The queue will consist of:\n # a) time : in in microseconds that is desired for calculating the outage\n # b) lat : latitude in decimal degrees\n # c) lon : longitude in decimal degrees\n # d) circuitID : circuit ID filter to be used for identification of a\n # specific circuit within the area of interest\n # e) assetID : asset ID filter to be used within the area of interest\n # f) number of votes : number of votes to be used for qualifying the outage\n # g) spatial filter : a string that represents radial filter coefficients. This is a string of the form:\n # \"[1,0; .2,.2; .3,.01]\"\n # \"[w0,d0; w1,d1; w3,d3; ... ]\" where w0 is the weight (typically 0 to 1)\n # and d0 is the distance in miles or\n # whatever the units are set to in the config file.\n # The distance is the distance along a line that runs through the asset lat/lon and is parallel to the\n # nearest upstream circuit segment. The ONT distance is projected to this circuit line and is filtered\n # by the same spatial filter coefficients.\n # In addition to the spatial filter the ONTs are weighted by their reputation\n # (if the flag is set) which is\n # calculated by an internally learned algorithm.\n self.my_local_logger.debug(\" Grooming local_q, size = %d\" % len(self.local_q))\n top_of_q_data = copy.copy(self.local_q.popleft()) # was popleft\n self.groomer_state = \"1.1:GROOM_POP_QUEUE\"\n self.my_local_logger.info(\"Got a local queue item.\")\n if \"ttl\" in top_of_q_data.keys():\n ttl = top_of_q_data[\"ttl\"]\n else:\n ttl = self.ttl\n if top_of_q_data[\"payload\"]['radius'] != self.working_radius:\n self.resolution = compute_resolution(top_of_q_data[\"payload\"][\"radius\"])\n this_cell_guid, this_timestamp_guid = self.compute_cell_guid(top_of_q_data[\"payload\"], self.resolution)\n keys = self.get_shared_data('cell_collection_dict_keys')\n collection_set = self.get_shared_data('cell_collection_set')\n ##################################################\n # STEP 2) Look at the GUID generator for the lat and lon and see if the shared\n # memory contains a cell structure for this item.\n if this_cell_guid in keys: # my_shared_data['cell_collection_dict'].keys():\n # 2.1) If it does contain the GUID then determine the state of that cell.\n # 2.2) If the time stamp GUID of this cell GUID is within the resolution outage\n # machine then continue with step 4.\n self.groomer_state = \"1.2:GROOM_FOUND_SHARED_DATA\"\n self.my_local_logger.debug(\"This cell is already in shared memory, \"\n \"and is fully populated checking using a copy of it\")\n cell = self.get_shared_data('cell_collection_dict', this_cell_guid)\n self.my_local_logger.debug(\"EXISTS: %s[%f,%f]TTL=%d\" %\n (this_cell_guid, cell[\"lat_lon\"][0], cell[\"lat_lon\"][1], cell[\"ttl\"]))\n else: # 2.3) If it does not contain the GUID or the time stamp GUID does not match then go to step 3.\n # STEP 3) Query the API and find all utility assets within the region of interest\n # (defined by a config parameter as the starting zoom level in miles)\n # These will include house, transformers, poles, wires and so on.\n # The first 2 letters of the assetID will be the item type. Save this cell to the shared memory set\n # From this collection of assets create a SET of items in a shared queue that\n # holds these items until so that other processes don't work on these items at the same time.\n # The items will be filtered by assetID (item 1e) and circuitID (item 1d) if these fields are filled in.\n cell = self.build_new_cell(this_cell_guid, top_of_q_data[\"payload\"], ttl)\n self.save_cell_in_shared_mem(this_cell_guid, cell)\n self.my_local_logger.debug(\"CREATE: %s[%f,%f]TTL=%d\" %\n (this_cell_guid, cell[\"lat_lon\"][0], cell[\"lat_lon\"][1], ttl))\n self.groomer_state = \"1.3:GROOM_BUILD_NEW_CELLS\"\n\n # self.plot_assets()\n\n # At this point the cell has been created and tested to be sure that its the one we want.\n # Now examine the neighboring cells from this cells collection:\n\n # STEP 4) Using the result of step 3 the cell is ready to be processed.\n # 4.1) The next step is to look at each of the 6 neighboring cells.\n # This is done by examining the 6 cells and determining their state.\n # 4.1.1) Check the TTL count of this cell. If the TTL is zero continue to the next cell\n # in the incoming Queue.\n self.groomer_state = \"1.4:GROOM_PROPAGATE_CELL\"\n\n if cell['ttl'] != 0:\n for i, items in enumerate(cell['neighbors']): # the 6 nearest neighbors\n this_neighbor_cell = items[0]\n angle = items[1]\n # The six neighbor cells are initially set to zero\n # this_cell = {'neighbors': [[\"\",0], [\"\",60], [\"\",120], [\"\",180], [\"\",240],[\"\",300]],\n # 'assets': {},\n # 'onts': {},\n # 'circuits': {},\n # 'state': 'create',\n # 'lat_lon': [lat, lon],\n # 'radius': radius,\n # 'groom_time': groom_time\n # }\n distance = 2 * top_of_q_data[\"payload\"][\"radius\"]\n if not this_neighbor_cell:\n # We need to copy each of the neighbor cells to make sure we get a unique data structure\n neighbor_cell_message = copy.copy(top_of_q_data)\n self.my_local_logger.debug(\"%s neighbor[%d] is empty, [%f][%f], filling it now\" %\n (this_cell_guid, i, cell[\"lat_lon\"][0], cell[\"lat_lon\"][1]))\n new_lat, new_lon = move_to_lat_lon(cell[\"lat_lon\"][0], cell[\"lat_lon\"][1], distance, angle)\n # jump out of the loop if the cell is outside the region of interest\n company_name = top_of_q_data['payload']['company']\n if company_name not in self.utility_region.keys():\n self.my_local_logger.error(\"Skipping cell rebroadcast \"\n \"because company_name='%s' is not in utility_region.\" %\n company_name)\n self.groomer_state = \"1.5.0:GROOM_ABORT_PROPAGATE\"\n continue\n\n if (new_lat < self.utility_region[company_name]['min_latitude']) or \\\n (new_lat > self.utility_region[company_name]['max_latitude']) or \\\n (new_lon > self.utility_region[company_name]['max_longitude']) or \\\n (new_lon < self.utility_region[company_name]['min_longitude']):\n # Here is where the outage time can be advanced by 2 weeks and run again.\n if not g_config.IS_DEPLOYED:\n print \"Skipping neighbor cell rebroadcast at \" \\\n \"lat = %f, lon = %f because outside utility region.\" % \\\n (new_lat, new_lon)\n self.my_local_logger.info(\"Skipping neighbor cell rebroadcast at \"\n \"lat = %f, lon = %f because outside utility region.\" %\n (new_lat, new_lon))\n self.groomer_state = \"1.5.1:GROOM_ABORT_PROPAGATE\"\n continue\n neighbor_cell_message[\"payload\"][\"longitude\"] = new_lon\n neighbor_cell_message[\"payload\"][\"latitude\"] = new_lat\n new_cell_guid, new_timestamp_guid = self.compute_cell_guid(neighbor_cell_message[\"payload\"],\n self.resolution)\n if new_cell_guid not in collection_set:\n # STEP 5) Queue up a grooming process for neighboring cells that\n # allows another process to pick up the outage calculation for the rest of the circuit.\n # The neighboring cell is defined by outage location +/- 1 one patch area of\n # interest in 6 hexagonal directions. This will create a small overlap on the cell corners.\n self.groomer_state = \"1.5.1:GROOM_QUEUE_NEIGHBOR\"\n self.my_local_logger.debug(\"queue length X = %d\" % len(self.local_q))\n self.mark_cell_in_shared_memory(new_cell_guid)\n if cell['ttl'] == -1:\n # If the TTL count is -1 then this is a full propagation list so this causes a\n # post (publish) of a new query. Then continue with the next cell.\n neighbor_cell_message[\"ttl\"] = -1\n else:\n # Decrease the TTL count and post (publish) a new query.\n # Then continue with the next cell.\n neighbor_cell_message[\"ttl\"] = cell['ttl'] - 1\n self.my_local_logger.debug(\" POST: %s[%f,%f]TTL=%d->%s[%f,%f]TTL=%d(%d)\" %\n (this_cell_guid, cell[\"lat_lon\"][0], cell[\"lat_lon\"][1], ttl,\n new_cell_guid, new_lat, new_lon, neighbor_cell_message[\"ttl\"],\n angle))\n ########################\n # This is the work around to just post the message back to the local_q instead of sending it\n # out to the rabbit bus for parallel processing\n ####################################\n # BURNED BY PYTHON\n ####################################\n # The queue append does not copy the data, instead it just posts a pointer to the data\n # self.local_q.append(copy.deepcopy(neighbor_cell_message))\n # self.my_local_logger.debug(\"gueue length Y = %d\" % len(self.local_q)\n self.queue_to_publish(copy.deepcopy(neighbor_cell_message))\n else:\n self.groomer_state = \"1.5.2:GROOM_LINK_NEIGHBOR\"\n # time.sleep(1)\n self.my_local_logger.debug(\"Stitching %s's neighbor[%d]@[%f][%f] to this cell: %s\" %\n (this_cell_guid, i, cell[\"lat_lon\"][0], cell[\"lat_lon\"][1],\n new_cell_guid))\n self.my_local_logger.debug(\"SHARED: %s[%f,%f]TTL=%d->%s[%f,%f]TTL=%d (%d)\" %\n (this_cell_guid, cell[\"lat_lon\"][0], cell[\"lat_lon\"][1], ttl,\n new_cell_guid, new_lat, new_lon, cell['ttl'], angle))\n # If the cell is already in shared memory then just connect the cells neighbors\n cell['neighbors'][i] = [new_cell_guid, angle]\n self.save_cell_in_shared_mem(this_cell_guid, cell)\n\n # STEP 6) OUTAGE CALCULATION\n # at this point the outage region is contained within one cell.\n # This is the process of grooming the outage. The data is ready to be used for calculating the outage.\n # The filter algorithm was given above.\n # 6.1) First the temporal filter is applied to the assets in the cell\n self.groomer_state = \"1.6:GROOM_COMPUTE_OUTAGE\"\n t_cell = self.temporal_filter(cell)\n self.save_cell_in_shared_mem(this_cell_guid, t_cell)\n # 6.2) Second the spatial filter is applied to each assets in the cell\n s_cell = self.spatial_filter(t_cell)\n self.save_cell_in_shared_mem(this_cell_guid, s_cell)\n # 6.3) Once the filtered data is ready then the vote is applied to each ONT and the final vote is computed.\n v_cell = self.vote_on_assets(s_cell,\n top_of_q_data['payload']['temporal'],\n top_of_q_data['payload']['spatial'],\n top_of_q_data['payload']['votes'])\n self.save_cell_in_shared_mem(this_cell_guid, v_cell)\n # and the results is written back to the outage API.\n self.my_local_logger.info(\"Calling post_outage_on_asset.\")\n self.my_local_logger.info(\"Posting this payload: %s\" % json.dumps(top_of_q_data[\"payload\"]))\n self.post_outage_on_asset(v_cell, top_of_q_data[\"payload\"])\n\n self.end_time = time.time()\n elapsed_process_time = fmax(self.end_time - self.start_time, .001)\n self.groomer_state = \"0:IDLE\"\n self.groom_run_state = \"0:IDLE\"\n self.my_local_logger.info(\"Done. Elapsed time %f sec.\" % elapsed_process_time)", "def hourly_stats():\r\n count_total.delay()\r\n count_unique.delay()\r\n count_tags.delay()", "async def update_stats(self):\r\n\r\n\t\twhile True:\r\n\t\t\tlogging.info('Attempting to post server count')\r\n\t\t\ttry:\r\n\t\t\t\tawait self.dblpy.post_server_count()\r\n\t\t\t\tlogging.info(f'Posted server count ({len(self.bot.guilds)})')\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tlogging.exception(f'Failed to post server count\\n{type(e).__name__}: {e}')\r\n\t\t\tawait asyncio.sleep(1800)", "def refresh(self):\n self.active_member_count\n self.description\n self.lbmethod\n self.members\n self.minimum_active_member\n self.minimum_up_member\n self.slow_ramp_time\n self.statistics", "def get_stats(self):\n # pool.map needs an arg for each function that will be run\n dmx_mean = [self.dmx.mean()] * len(self.genome_paths)\n with ProcessingPool() as pool:\n results = pool.map(genome.mp_stats, self.genome_paths, dmx_mean)\n self.stats = pd.concat(results)\n self.stats.to_csv(self.stats_path)", "def runScheduler(self):\n\n for source in self.sources:\n intervals = [\n int(self.sources[source]['metrics'][x]['interval']) for x\n in range(0, len(self.sources[source]['metrics']))]\n sourceInterval = self.gcd(intervals)\n self.sources[source]['sourceInterval'] = sourceInterval\n self.logger.debug(self.sources[source]['metrics'])\n\n self.scheduler.add_job(\n self.getDriverData, 'interval', args=[\n self.sources[source]['metrics']],\n seconds=sourceInterval)", "def _monitor(cls):\n while not cls._stop_thread.is_set():\n cls.shrink_cache()\n time.sleep(random.random() * 10)", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def stat(**kwargs):\n print(\"output stats\")", "def autoAnalyze(self):\n print(\"Perfoming full automatic analysis...\")\n t1=time.perf_counter()\n self.cleanUp()\n self.figure_rois()\n self.figure_roi_inspect_all()\n self.figure_dGoR_roi(showEach=False,saveAs=self.folderSave+\"/avg.png\")\n self.figure_dGoR_roi(showEach=True,saveAs=self.folderSave+\"/each.png\")\n self.index()\n print(\"analysis completed in %.02f sec\"%(time.perf_counter()-t1))", "def collectStat(self, thread):\n\t\t# update average page load time\n\t\tif self.updated_count == 0:\n\t\t\tself.average_time = thread.load_time\n\t\telse:\n\t\t\tself.average_time = (self.average_time * self.updated_count + thread.load_time) / (self.updated_count + 1)\n\t\t# update stitistics by HTTP code\n\t\tif thread.code not in self.code_statistics:\n\t\t\tself.code_statistics[thread.code] = 1 \n\t\telse:\n\t\t\tself.code_statistics[thread.code] += 1\n\t\t# update count of processed pages\n\t\tself.updated_count += 1", "def compile_global_stats(results_dir='./../data/*/*cr_sizes*hdf5'):\n\n flist = glob.glob(results_dir)\n output = defaultdict(list)\n flist = [f for f in flist if 'nicmos' not in f]\n print(flist)\n flist.append('./../data/STIS/stis_cr_sizes.hdf5')\n results = [dask.delayed(tally_stats)(f) for f in flist]\n results = list(dask.compute(*results, scheduler='processes'))\n\n for instr, data in results:\n output[instr].append(data)\n\n for key in output.keys():\n cr_count = 0\n img_count = 0\n total_exptime = 0\n for val in output[key]:\n cr_count += val.cr_count\n img_count += val.img_count\n total_exptime += val.total_exptime\n output[key] = [cr_count, img_count, total_exptime]\n\n df = pd.DataFrame(output, index=['cr_count', 'img_count', 'total_exptime'])\n print(df)\n print('Total CR count: {}'.format(df.loc['cr_count', :].sum()))\n print('Total number of images analyzed: {}'.format(df.loc['img_count', :].sum()))\n print('Cumulative exposure time: {}'.format(df.loc['total_exptime', :].sum()))", "def watch_runs(run_storage_base, options, tight_loop_timer=1):\n import time\n import crython\n\n # @crython.job(minute=range(0, 60, frequency), second=0)\n # @crython.job(second=[5]) # once every minute\n # @crython.job(second=range(0, 60, 5)) # once every 5 seconds\n @crython.job(minute=range(0, 60, 15)) # once every 15 mins\n def _process_all_cron():\n logging.info('Running scheduled autoprocessing on: %s', run_storage_base)\n process_all_runs(run_storage_base, options)\n\n crython.start()\n\n try:\n while True:\n time.sleep(tight_loop_timer)\n except KeyboardInterrupt:\n logging.info(\"Stopped watching: %s\" % run_storage_base)\n pass", "def start(self):\n while True:\n LogService.log_info(\"aggregator\", \"Creating statistics\")\n self.create_statistics()\n LogService.log_info(\"aggregator\", \"Cleaning up\")\n self.cleanup_measurements()\n LogService.log_info(\"aggregator\", \"Sleeping for 60 minutes\")\n time.sleep(60*60)", "def monitor(self):\n while not self.terminated:\n try:\n if (time.time() - self.updated_time) < 5:\n messages = self.messages.copy()\n # procs = np.min([ len(messages), 9 ]) + 1\n # pool = ThreadPool(procs)\n # pool.map(self.process, messages)\n # pool.close()\n # pool.join()\n for message in messages:\n self.process(message)\n elif self.ws:\n self.updated_time += 10\n self.ws.close()\n except Exception as e:\n self.on_error(None, \"Monitoring Error: {}\".format(e))\n continue\n finally:\n time.sleep(0.1)", "def queueStatusAll():", "def start_monitor(self, collector):\n pass", "def monitor_cmd(tkt_spool_dir, scandir, location):\n if location:\n _LOGGER.info('Using location filter: %s', location)\n else:\n _LOGGER.info('No location filter, configuring all cells.')\n\n admin_cell = context.GLOBAL.admin.cell()\n while True:\n for cell in admin_cell.list({}):\n celluser = cell['username']\n cellname = cell['_id']\n celllocation = cell.get('location', '')\n if location and not fnmatch.fnmatch(celllocation, location):\n _LOGGER.info(\n 'Skip cell by location: %s %s', cellname, celllocation\n )\n continue\n\n _configure_locker(tkt_spool_dir, scandir, cellname, celluser)\n\n # TODO: need to stop/remove extra services. For now, extra\n # services are removed on group restart.\n\n supervisor.control_svscan(scandir, (\n supervisor.SvscanControlAction.alarm,\n supervisor.SvscanControlAction.nuke\n ))\n time.sleep(60)", "def monitor(self, **kwargs):\n self.show_info(monitor=True, **kwargs)", "def poll(self):\n\n stats = {}\n for vcenter in self.vcenters:\n stats[vcenter] = self.poll_vcenter(vcenter)\n\n return stats", "def run_all(self):\n self.formatter.section_start('Scratch Memory Info')\n self.formatter.section_start('Per priority')\n self.analyse_per_priority()\n self.formatter.section_end()\n self.formatter.section_start('Per task')\n self.analyse_per_task()\n self.formatter.section_end()\n self.formatter.section_end()", "def _printSummary(self):\n\t\t### COP OUT\n\t\tif self.params['background'] is True:\n\t\t\tself.stats['count'] += 1\n\t\t\treturn\n\n\t\t### THIS NEEDS TO BECOME MUCH MORE GENERAL, e.g. Peaks\n\t\ttdiff = time.time()-self.stats['startseries']\n\t\tif not self.params['continue'] or tdiff > 0.1:\n\t\t\tcount = self.stats['count']\n\t\t\t#if(count != self.stats['lastcount']):\n\t\t\tsys.stderr.write(\"\\n\\tSUMMARY: \"+self.functionname+\"\\n\")\n\t\t\tself._printLine()\n\t\t\tsys.stderr.write(\"\\tTIME: \\t\"+apDisplay.timeString(tdiff)+\"\\n\")\n\t\t\tself.stats['timesum'] = self.stats['timesum'] + tdiff\n\t\t\tself.stats['timesumsq'] = self.stats['timesumsq'] + (tdiff**2)\n\t\t\ttimesum = self.stats['timesum']\n\t\t\ttimesumsq = self.stats['timesumsq']\n\t\t\tif(count > 1):\n\t\t\t\ttimeavg = float(timesum)/float(count)\n\t\t\t\ttimestdev = math.sqrt(float(count*timesumsq - timesum**2) / float(count*(count-1)))\n\t\t\t\ttimeremain = (float(timeavg)+float(timestdev))*self.stats['seriesleft']\n\t\t\t\tsys.stderr.write(\"\\tAVG TIME: \\t\"+apDisplay.timeString(timeavg,timestdev)+\"\\n\")\n\t\t\t\t#print \"\\t(- TOTAL:\",apDisplay.timeString(timesum),\" -)\"\n\t\t\t\tif(self.stats['seriesleft'] > 0):\n\t\t\t\t\tsys.stderr.write(\"\\t(- REMAINING TIME: \"+apDisplay.timeString(timeremain)+\" for \"\n\t\t\t\t\t\t+str(self.stats['seriesleft'])+\" series -)\\n\")\n\t\t\t#print \"\\tMEM: \",(mem.active()-startmem)/1024,\"M (\",(mem.active()-startmem)/(1024*count),\"M)\"\n\t\t\tself.stats['count'] += 1\n\t\t\tself._printLine()", "def __show_all_metrics(self):\n for obj in self.metrics_list:\n self.__print_metrics_info(obj.get_name())\n print()", "async def check_status(self):\n while True:\n async with self._loop_lock:\n new_monitor_processes = {}\n for class_name in self.monitor_processes:\n monitor = self.monitor_processes[class_name][\"process\"]\n if monitor.poll() is not None:\n log = f\"Monitor {class_name} has stopped with code: {monitor.returncode}\"\n if monitor.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Monitor \" + class_name,\n monitor.returncode,\n monitor.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_monitor_processes[class_name] = self.monitor_processes[\n class_name\n ]\n self.monitor_processes = new_monitor_processes\n\n new_scraper_processes = {}\n for class_name in self.scraper_processes:\n scraper = self.scraper_processes[class_name][\"process\"]\n if scraper.poll() is not None:\n log = f\"Scraper {class_name} has stopped with code: {scraper.returncode}\"\n if scraper.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Scraper \" + class_name,\n scraper.returncode,\n scraper.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_scraper_processes[class_name] = self.scraper_processes[\n class_name\n ]\n self.scraper_processes = new_scraper_processes\n await asyncio.sleep(1)", "def refresh():\n DB.drop_all()\n DB.create_all()\n df_meas = open_api.measurements(city='Los Angeles', parameter='pm25', df=True)\n df_meas['date.utc'] = df_meas['date.utc'].astype(str)\n create_DB_records(df_meas)\n DB.session.commit()\n message = 'Data refreshed on: ' + str(datetime.datetime.now())\n over9s = Record.query.filter(Record.value > 9)\n recs = Record.query.filter(Record.id < 20)\n over5s = Record.query.filter(Record.value > 5)\n return render_template('base.html', message=message, over9s=over9s, over5s=over5s, recs=recs)", "def updateall(self, params):\n for gauge in self.gauges:\n self.safexec(gauge.updateall, params)", "def report(self):\n\n job_summary = {}\n for job in self._jobs:\n \n if job.step_name not in job_summary:\n job_summary[ job.step_name ] = {}\n job_summary[ job.step_name ][ 'DONE' ] = 0\n job_summary[ job.step_name ][ 'RUNNING' ] = 0\n job_summary[ job.step_name ][ 'QUEUING' ] = 0\n job_summary[ job.step_name ][ 'FAILED' ] = 0\n job_summary[ job.step_name ][ 'UNKNOWN' ] = 0\n job_summary[ job.step_name ][ 'max_mem' ] = 0\n job_summary[ job.step_name ][ 'cputime' ] = 0\n\n if job.status == Job_status.FINISHED:\n job_summary[ job.step_name ][ 'DONE' ] += 1\n if job.cputime is not None:\n job_summary[ job.step_name ]['cputime'] += int(job.cputime)\n\n if job.max_memory is not None and job.max_memory > job_summary[ job.step_name ][ 'max_mem']:\n job_summary[ job.step_name ][ 'max_mem'] = int(job.max_memory)\n\n elif job.status == Job_status.RUNNING:\n job_summary[ job.step_name ][ 'RUNNING' ] += 1\n elif job.status == Job_status.QUEUEING or job.status == Job_status.SUBMITTED:\n job_summary[ job.step_name ][ 'QUEUING' ] += 1\n elif job.status == Job_status.FAILED or job.status == Job_status.NO_RESTART:\n job_summary[ job.step_name ][ 'FAILED' ] += 1\n else:\n job_summary[ job.step_name ][ 'UNKNOWN' ] += 1\n\n\n\n local_time = strftime(\"%d/%m/%Y %H:%M\", time.localtime())\n \n\n pickle_file = \"{}.{}\".format(self.pipeline.project_name, self.pipeline._pid)\n\n print(\"[{} @{} {}]\".format( local_time,self.pipeline._hostname , pickle_file))\n\n print(\"{:20} || {:12} || {:12} || {:2s} {:2s} {:2s} {:2s} {:2s}\".format(\"Run stats\", \"Runtime\", \"Max Mem\", \"D\",\"R\",\"Q\",\"F\",\"U\"))\n\n for step in sorted(self.pipeline._workflow._analysis_order, key=self.pipeline._workflow._analysis_order.__getitem__):\n if step not in job_summary:\n continue\n\n print(\"{:20} || {:12} || {:12} || {:02d}/{:02d}/{:02d}/{:02d}/{:02d}\".format(step, \n self.format_time(job_summary[ step ]['cputime']),\n self.format_memory(job_summary[ step ]['max_mem']),\n job_summary[ step ][ 'DONE' ],\n job_summary[ step ][ 'RUNNING' ],\n job_summary[ step ][ 'QUEUING' ],\n job_summary[ step ][ 'FAILED' ],\n job_summary[ step ][ 'UNKNOWN' ]))" ]
[ "0.6406339", "0.63248503", "0.615353", "0.60156983", "0.5939783", "0.58072263", "0.57075703", "0.56916606", "0.5660787", "0.56502295", "0.56461763", "0.5633014", "0.55788845", "0.54534495", "0.5422858", "0.54086626", "0.54054075", "0.53678644", "0.5366866", "0.53413457", "0.5332131", "0.53231645", "0.5299922", "0.52984035", "0.52947456", "0.5281837", "0.52797896", "0.5249755", "0.52482074", "0.5246923", "0.52445346", "0.52186537", "0.5215776", "0.52141804", "0.5211624", "0.52057815", "0.51960903", "0.5188401", "0.5187024", "0.5182379", "0.51787305", "0.5178683", "0.5177778", "0.5163606", "0.5161778", "0.51555985", "0.51535594", "0.5147523", "0.5117088", "0.51162064", "0.5116091", "0.5113854", "0.51098967", "0.50931203", "0.50928766", "0.5081052", "0.50743586", "0.506874", "0.5042759", "0.5038567", "0.50312656", "0.5016252", "0.5011784", "0.50000715", "0.49875176", "0.49829307", "0.49803016", "0.49736834", "0.49577934", "0.49532768", "0.49522093", "0.49509174", "0.49487433", "0.49451667", "0.4942176", "0.49401855", "0.4938394", "0.49271637", "0.4925834", "0.4923432", "0.49168223", "0.49151912", "0.49111062", "0.48945475", "0.48937908", "0.48924634", "0.48855206", "0.48853126", "0.48788574", "0.48770928", "0.48754054", "0.48720288", "0.48685792", "0.48662797", "0.4864313", "0.48600063", "0.48583797", "0.48570284", "0.4855837", "0.48544535", "0.48531404" ]
0.0
-1
Sets parameters for rigs rig_ids_str coma separated string with rig ids "1,2,3,4" miner Miner to set. Leave it null if you do not want to change. "claymore", "claymorez", "ewbf", ... miner2 Second miner to set. Leave it null if you do not want to change. "0" if you want to unset it. id_wal ID of wallet. Leave it null if you do not want to change. id_oc ID of OC profile. Leave it null if you do not want to change. bool|mixed
def multiRocket(self, rig_ids_str, miner, miner2, id_wal, id_oc): if rig_ids_str is not None: self.log("Rigs ids required") exit() params = { 'method': 'multiRocket', 'rig_ids_str': rig_ids_str, 'miner': miner, 'miner2': miner2, 'id_wal': id_wal, 'id_oc': id_oc } result = self.request(params) if 'error' in result: return False return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_sids(self, sids):\n self._sids = sids\n # encode sids in RGB\n r = sids // 256**2\n rem = sids % 256**2 # remainder\n g = rem // 256\n b = rem % 256\n self.rgbsids = np.zeros((self.npoints, 3), dtype=np.uint8)\n self.rgbsids[:, 0] = r\n self.rgbsids[:, 1] = g\n self.rgbsids[:, 2] = b", "def set_sids(self, sids):\n self._sids = sids\n # encode sids in RGB\n r = sids // 256**2\n rem = sids % 256**2 # remainder\n g = rem // 256\n b = rem % 256\n self.rgbsids = np.zeros((self.npoints, 3), dtype=np.uint8)\n self.rgbsids[:, 0] = r\n self.rgbsids[:, 1] = g\n self.rgbsids[:, 2] = b", "def set_reads(self,bkid,pgs=0,vgs=None):\n# logging.debug('models.set_reads(%s,%s)'%(bkid,pg))\n myreads = self.get_reads()\n try:\n pg = int(pgs)\n except:\n pg = 0\n if bkid not in myreads:\n myreads[bkid] = [datetime.strftime(datetime.utcnow(),'%Y-%m-%d %H:%M:%S'),pg,vgs or '']\n else:\n if myreads[bkid][1] <= pg:\n myreads[bkid][1] = pg\n if vgs:\n if isinstance(vgs,list):\n myreads[bkid][2] = ','.join(filter(None,vgs))\n elif myreads[bkid][2]:\n mrs = myreads[bkid][2].split(',')\n if vgs in mrs: mrs.remove(vgs)\n mrs.append(vgs)\n myreads[bkid][2] = ','.join(filter(None,mrs))\n else:\n myreads[bkid][2] = str(vgs)\n self.put_reads(myreads)\n return True", "def set_params(self, params):\n for item in params:\n if len(item.split(\"-\")) == 5:\n self.params[item.split(\"-\")[-1]] = params[item]\n elif item.split(\"-\")[4] == \"BECKE88\":\n self.becke88.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"BECKE88_LR\":\n self.becke88_lr.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"BECKE88_LR_ADIABATIC\":\n self.becke88_lr_adiabatic.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"BECKE97\":\n self.becke97.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"BECKE_ROUSSEL\":\n self.becke_roussel.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"BEEF\":\n self.beef.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"CS1\":\n self.cs1.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"GV09\":\n self.gv09.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"HCTH\":\n self.hcth.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"KE_GGA\":\n self.ke_gga.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"KE_LIBXC\":\n self.ke_libxc.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"LDA_HOLE_T_C_LR\":\n self.lda_hole_t_c_lr.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"LIBXC\":\n self.libxc.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"LYP\":\n self.lyp.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"LYP_ADIABATIC\":\n self.lyp_adiabatic.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"OPTX\":\n self.optx.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"P86C\":\n self.p86c.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"PADE\":\n self.pade.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"PBE\":\n self.pbe.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"PBE_HOLE_T_C_LR\":\n self.pbe_hole_t_c_lr.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"PW92\":\n self.pw92.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"PZ81\":\n self.pz81.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"TF\":\n self.tf.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"TFW\":\n self.tfw.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"TPSS\":\n self.tpss.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"VWN\":\n self.vwn.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"XALPHA\":\n self.xalpha.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"XGGA\":\n self.xgga.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"XWPBE\":\n self.xwpbe.set_params({item: params[item]})\n else:\n pass", "def set_params(self, params):\n for item in params:\n if len(item.split(\"-\")) == 6:\n self.params[item.split(\"-\")[-1]] = params[item]\n elif item.split(\"-\")[5] == \"BECKE88\":\n self.becke88.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"BECKE88_LR\":\n self.becke88_lr.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"BECKE88_LR_ADIABATIC\":\n self.becke88_lr_adiabatic.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"BECKE97\":\n self.becke97.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"BECKE_ROUSSEL\":\n self.becke_roussel.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"BEEF\":\n self.beef.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"CS1\":\n self.cs1.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"GV09\":\n self.gv09.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"HCTH\":\n self.hcth.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"KE_GGA\":\n self.ke_gga.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"KE_LIBXC\":\n self.ke_libxc.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"LDA_HOLE_T_C_LR\":\n self.lda_hole_t_c_lr.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"LIBXC\":\n self.libxc.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"LYP\":\n self.lyp.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"LYP_ADIABATIC\":\n self.lyp_adiabatic.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"OPTX\":\n self.optx.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"P86C\":\n self.p86c.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"PADE\":\n self.pade.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"PBE\":\n self.pbe.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"PBE_HOLE_T_C_LR\":\n self.pbe_hole_t_c_lr.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"PW92\":\n self.pw92.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"PZ81\":\n self.pz81.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"TF\":\n self.tf.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"TFW\":\n self.tfw.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"TPSS\":\n self.tpss.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"VWN\":\n self.vwn.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"XALPHA\":\n self.xalpha.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"XGGA\":\n self.xgga.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"XWPBE\":\n self.xwpbe.set_params({item: params[item]})\n else:\n pass", "async def set_roster(\n self, jid: typing.Union[JID, str], roster_items: dict, **send_kwargs\n ) -> Iq:\n if self.granted_privileges[\"roster\"] not in (\"set\", \"both\"):\n log.error(\"The server did not grant us privileges to set rosters\")\n raise ValueError\n else:\n return await self._make_set_roster(jid, roster_items).send(**send_kwargs)", "def __set_receivers_id(self, receivers_id):\n if not isinstance(receivers_id, list):\n raise TypeError('Receivers id should be a list')\n if not all(isinstance(receiver_id, int) for receiver_id in receivers_id): # Check if all elements are int\n raise TypeError('All elements in the receivers id list should be integer')\n if any(receiver_id < 0 for receiver_id in receivers_id): # If any elements is negative\n raise ValueError('An element is negative, there can not be negative ids')\n self.__receivers_id = receivers_id", "def init_armor_set(self, armor_set):\n \n if armor_set:\n for armor_build in armor_set:\n armor = armor_build(self)\n self.armor_set.append(armor)\n armor.activate()", "def set_ships(self, dictionary):\n for key, value in dictionary.items():\n if value < 0:\n raise SettingsError(\"No negative ships\")\n self._parser.set(\"settings\", \"carriers\", str(dictionary[CARRIER]))\n self._parser.set(\"settings\", \"battleships\", str(dictionary[BATTLESHIP]))\n self._parser.set(\"settings\", \"cruisers\", str(dictionary[CRUISER]))\n self._parser.set(\"settings\", \"destroyers\", str(dictionary[DESTROYER]))\n self._save()", "def addtagDic(dic_i,tag,tag_str,setint=False):\n if( len( tag_str ) ):\n dic_i[tag] = []\n for id_s in tag_str.split():\n if( setint ):\n dic_i[tag].append(int(id_s))\n else:\n dic_i[tag].append(id_s)\n \n return dic_i", "def addtagDic(dic_i,tag,tag_str,setint=False):\n if( len( tag_str ) ):\n dic_i[tag] = []\n for id_s in tag_str.split():\n if( setint ):\n dic_i[tag].append(int(id_s))\n else:\n dic_i[tag].append(id_s)\n \n return dic_i", "def updateDict(self,strSet):\n\tself.createAdjList(strSet,\"remove\")", "def reviewer_id(self, reviewer_id: int):\n\n self._reviewer_id = reviewer_id", "def set_srid(self, srid: ir.IntegerValue) -> GeoSpatialValue:\n return ops.GeoSetSRID(self, srid=srid).to_expr()", "def setModemInitString(self, initString, unitCode=0):\n resp = self.XAPCommand('MINIT', initString, unitCode=unitCode)\n return resp", "def krsedg(self, krsedg):\n if (self.local_vars_configuration.client_side_validation and\n krsedg is not None and len(krsedg) > 32):\n raise ValueError(\"Invalid value for `krsedg`, length must be less than or equal to `32`\") # noqa: E501\n\n self._krsedg = krsedg", "def replace_ids_submission(ids):\n \n item = np.zeros((len(ids), ), dtype = 'int')\n user = np.zeros((len(ids), ), dtype = 'int')\n for i in range(len(ids)):\n row, col = ids[i].split(\"_\")\n item[i] = int(row.replace(\"r\", \"\"))\n user[i] = int(col.replace(\"c\", \"\"))\n \n return item, user", "async def set_chat_sticker_set(self, chat_id: typing.Union[base.Integer, base.String],\n sticker_set_name: base.String) -> base.Boolean:\n payload = generate_payload(**locals())\n result = await self.request(api.Methods.SET_CHAT_STICKER_SET, payload)\n\n return result", "def make_chromarms(\n chromsizes,\n midpoints,\n cols_chroms=(\"chrom\", \"length\"),\n cols_mids=(\"chrom\", \"mid\"),\n suffixes=(\"_p\", \"_q\"),\n):\n columns_to_drop = [\"index\", \"sub_index_\"]\n if len(cols_chroms) == 2:\n ck1, sk1 = cols_chroms\n elif len(cols_chroms) == 3:\n ck1, sk1, ek1 = cols_chroms\n\n if isinstance(chromsizes, pd.Series):\n df_chroms = (\n pd.DataFrame(chromsizes).reset_index().rename(columns={\"index\": ck1})\n )\n elif isinstance(chromsizes, pd.DataFrame):\n df_chroms = chromsizes.copy()\n else:\n raise ValueError(\"unknown input type for chromsizes\")\n\n if len(cols_chroms) == 2:\n _verify_columns(df_chroms, [ck1, sk1])\n columns_to_drop += [sk1]\n df_chroms[\"end\"] = df_chroms[sk1].values\n df_chroms[\"start\"] = 0\n sk1, ek1 = \"start\", \"end\"\n elif len(cols_chroms) == 3:\n ck1, sk1, ek1 = cols_chroms\n _verify_columns(df_chroms, [ck1, sk1, ek1], unique_cols=True)\n if any(df_chroms[sk1].values != 0):\n raise ValueError(\"all values in starts column must be zero\")\n else:\n raise ValueError(\"invalid number of cols_chroms\")\n\n ck2, sk2 = cols_mids\n if isinstance(midpoints, dict):\n df_mids = pd.DataFrame.from_dict(midpoints, orient=\"index\", columns=[sk2])\n df_mids.reset_index(inplace=True)\n df_mids.rename(columns={\"index\": ck2}, inplace=True)\n elif isinstance(midpoints, pd.DataFrame):\n df_mids = midpoints.copy()\n else:\n raise ValueError(\"unknown input type for midpoints\")\n _verify_columns(df_mids, [ck2, sk2])\n df_mids[\"start\"] = df_mids[sk2]\n df_mids[\"end\"] = df_mids[sk2]\n\n df_chromarms = ops.subtract(\n df_chroms,\n df_mids,\n cols1=(ck1, sk1, ek1),\n cols2=(ck2, \"start\", \"end\"),\n return_index=True,\n )\n if df_chromarms[\"sub_index_\"].max() > 1:\n raise ValueError(\n \"chromosome split into more than two arms, double-check midpoints\"\n )\n df_chromarms[\"name\"] = df_chromarms[ck1] + [\n suffixes[i] for i in df_chromarms[\"sub_index_\"].values\n ]\n # df_chromarms.drop(columns=columns_to_drop, inplace=True)\n return df_chromarms[[ck1, sk1, ek1, \"name\"]]", "def set_positions(self, x, y, station_diameter=40,\n hpol_phased_antennas=10, vpol_phased_antennas=10,\n hpol_phased_separation=1, vpol_phased_separation=1,\n hpol_phased_lowest=-49, vpol_phased_lowest=-69,\n outrigger_strings_per_station=3,\n outrigger_string_type=ARAString,\n **outrigger_string_kwargs):\n # Change defaults for outrigger strings\n if \"antennas_per_string\" not in outrigger_string_kwargs:\n outrigger_string_kwargs[\"antennas_per_string\"] = 8\n if \"antenna_separation\" not in outrigger_string_kwargs:\n n = outrigger_string_kwargs[\"antennas_per_string\"]\n sep = [1, 29] * int(n/2)\n outrigger_string_kwargs[\"antenna_separation\"] = sep[:n-1]\n if \"lowest_antenna\" not in outrigger_string_kwargs:\n outrigger_string_kwargs[\"lowest_antenna\"] = -100\n\n self.subsets.append(\n PhasedArrayString(x, y, antennas_per_string=hpol_phased_antennas,\n antenna_separation=hpol_phased_separation,\n lowest_antenna=hpol_phased_lowest,\n antenna_type=HpolAntenna)\n )\n self.subsets.append(\n PhasedArrayString(x, y, antennas_per_string=vpol_phased_antennas,\n antenna_separation=vpol_phased_separation,\n lowest_antenna=vpol_phased_lowest,\n antenna_type=VpolAntenna)\n )\n\n r = station_diameter/2\n for i in range(outrigger_strings_per_station):\n angle = 2*np.pi * i/outrigger_strings_per_station\n x_str = x + r*np.cos(angle)\n y_str = y + r*np.sin(angle)\n self.subsets.append(\n outrigger_string_type(x_str, y_str, **outrigger_string_kwargs)\n )", "def parse_run_range(self, run_range_str):\r\n\r\n assert isinstance(run_range_str, str)\r\n if not \"-\" in run_range_str:\r\n return None\r\n\r\n # split <>-<>\r\n (str_min, str_max) = run_range_str.split(\"-\")\r\n run_min_set = False\r\n run_max_set = False\r\n\r\n # parse run min\r\n try:\r\n run_min = int(str_min)\r\n run_min_set = True\r\n except ValueError:\r\n run_min = 0\r\n\r\n # parse run max\r\n try:\r\n run_max = int(str_max)\r\n run_max_set = True\r\n except ValueError:\r\n run_max = INFINITE_RUN\r\n\r\n return run_min, run_max, run_min_set, run_max_set", "def geneset(self, value: Union[str, int, Geneset, List[str]]):\n # Geneset can be set only once, prevent modifications\n if self._geneset is not None:\n raise ValueError(\"It is not allowed to change geneset value.\")\n\n if value is None:\n return\n\n # If id / slug of a geneset is given, get it from the Resolwe server\n if isinstance(value, (int, str)):\n gs = self.resolwe.geneset.get(value)\n value = gs.genes\n elif isinstance(value, Geneset):\n value = value.genes\n\n if isinstance(value, (list, set, tuple, pd.Series)):\n self._geneset = set(value)\n else:\n raise ValueError(f'Unsupported type of \"geneset\" input: {value}.')", "def set_RQ(self): \n # Convenience abbreviations.\n ion_atms = self.ion_atms # Atoms to average dictionary\n ion_res = self.ion_res # Ionizable residues\n RQ = self.RQ # list of q_i coordinates\n for res_id in self.res_ids:\n if res_id[0] in ion_res:\n # Atoms to average. Omitting the residue id at the 0 position in the\n # 'res'-list, therefore 'res_id[3:]'.\n # 'atm.split()[0]' returns the atom type.\n av_atms = []\n for atm in res_id[3:]:\n if atm.split()[0] in ion_atms[res_id[0]]:\n av_atms.append(\" \".join(res_id[:3]) + \" \" + atm.strip())\n RQ.append(av_atms) \n self.RQ = RQ", "def srs_id(self, srs_id):\n self.logger.debug(\"In 'srs_id' setter.\")\n\n if len(srs_id) < 3:\n raise Exception(\"SRS ID is too short, must be more than 3 characters.\")\n\n self._srs_id = srs_id", "def assign_gids(self, int[::1] gids):\n self.mdb.get().assign_gids(<int> gids.size, <const int *> &gids[0])", "def setReactionId(self, *args):\n return _libsbml.ReactionGlyph_setReactionId(self, *args)", "async def setReaders(self, eventID: str, readers: Iterable[str]) -> None:", "def _format_set_iss(self, format_set_iss=None):\n ## Format iss\n if format_set_iss is None or format_set_iss == 'general':\n self._set_iss = self._general_set_iss\n elif format_set_iss == 'null':\n self._set_iss = self._null_set_iss\n elif format_set_iss == 'int':\n self._set_iss = self._int_set_iss\n elif format_set_iss == 'list':\n self._set_iss = self._list_set_iss", "def _params(self, qs):\n return [str_id for str_id in qs.split(',')]", "def get_camp_ids_containing_str(marketer_id, string):\n all_campaigns = outb.get_campaigns_per_marketer(marketer_id).get(marketer_id[0])\n return [x.get(\"id\") for x in all_campaigns if string in x[\"name\"]]", "def set_run(self, run_id: str):\n self.run_id = run_id", "def fromString(cls, string):\n # From SAM specification v1.5, slightly adapted for single-token parsing\n pattern = r\"^[0-9]+[MIDNSHPX=]\" \n string = string.strip()\n if string == '*':\n return CIGAR.fromList(['*'])\n parsed = []\n s = string\n # Parse string token (e.g. 14M) by token, re.findall is not enough,\n # because non-matching subsequences between (e.g. \"14Mblabla3D4M\") would\n # go unnoticed! Also it would be good to abort as early as possible if\n # an invalid string is found to avoid parsing possibly very long strings\n while s != '':\n r = re.match(pattern, s)\n if not r:\n raise ValueError('Invalid CIGAR string: \"'+string+'\"')\n g = r.group(0)\n parsed.append(g)\n s = s[len(g):]\n \n parsed = [(int(p[:-1]), p[-1:]) for p in parsed]\n\n return CIGAR.fromList(parsed)", "def validate_minibatch_size_str(minibatch_size_str):\n if not isinstance(minibatch_size_str, str):\n return False\n a = minibatch_size_str.split(\"/\")\n assert len(a) != 0 # would be code error\n\n for elem in a:\n b = elem.split('=')\n # We expect b to have length 2 in the normal case.\n if len(b) != 2:\n # one-element 'b' is OK if len(a) is 1 (so there is only\n # one choice)... this would mean somebody just gave \"25\"\n # or something like that for the minibatch size.\n if len(a) == 1 and len(b) == 1:\n return validate_range_str(elem)\n else:\n return False\n # check that the thing before the '=' sign is a positive integer\n try:\n if int(b[0]) <= 0:\n return False\n except:\n return False # not an integer at all.\n\n if not validate_range_str(b[1]):\n return False\n return True", "def rc_response_sets_id(self, rc_response_sets_id):\n\n self._rc_response_sets_id = rc_response_sets_id", "def choose_variant_annotation(csq_string, variant_annotation_rank_dict, gene_ix, conseq_ix):\n minrank = len(variant_annotation_rank_dict)\n gene_id = 'NA'\n conseq = 'NA'\n\n for tr in csq_string.split(','):\n annots = tr.split('|')\n for v in annots[conseq_ix].split('&'):\n if v in variant_annotation_rank_dict:\n r = variant_annotation_rank_dict[v]\n if r<minrank:\n minrank = r\n gene_id = annots[gene_ix]\n conseq = v\n return gene_id, conseq", "def _SetRunParameters(self, params: Mapping[str, Any]) -> None:\n # Ideally YCSB should be refactored to include a function that just takes\n # commands for a run, but that will be a large refactor.\n FLAGS['ycsb_run_parameters'].unparse()\n FLAGS['ycsb_run_parameters'].parse([f'{k}={v}' for k, v in params.items()])", "def parseSet(cmds):\n if len(cmds) != 0:\n first = str.strip(cmds[0])\n if first[0] == 'w':\n pass\n elif first[0] == 'r':\n pass\n else:\n parseExpr(first)\n parseSet(cmds[1:])", "def alterar_arqid(alterar_etrade: int, alterar_gourmet: int):\n try:\n row = get_settings()\n\n if row is not None:\n check_this_file = open(fix_path(row[1]) +\n r'\\PDV\\bin\\Debug\\ArqID.txt', 'r').readline()\n server = row[2]\n station = row[3]\n else:\n return \"Os caminhos não foram encontrados no banco de dados\"\n\n if alterar_etrade == 1:\n SYSTEMS.append(\"ETrade\")\n if alterar_gourmet == 1:\n SYSTEMS.append(\"Gourmet\")\n\n for app in SYSTEMS:\n arqid = fix_path(row[1]) + r\"%s\\bin\\Debug\\ArqID.txt\" % (app)\n if server.rsplit('\\\\', 1)[-1] in check_this_file:\n change_line_on_file(arqid, station)\n result = station\n else:\n change_line_on_file(arqid, server)\n result = server\n\n if __name__ == '__main__':\n ctypes.windll.user32.MessageBoxW(0, result, \"change arqid\", 0)\n return result\n\n return result\n except:\n return 'Verifique o caminho do repositório!'", "def string_to_param(self,string):\n\n if (string.startswith(\"log_\")): return math.log10(self.string_to_param(string[4:]))\n if (string.startswith(\"ln_\")): return math.log(self.string_to_param(string[3:]))\n if (string.startswith(\"exp_\")): return math.exp(self.string_to_param(string[4:]))\n if (string == \"Mass\"): return self.glb[imass]/constants.solar_mass\n if (string == \"Radius\"): return self.glb[iradius]/constants.solar_radius\n if (string == \"Luminosity\"): return self.glb[iluminosity]/constants.solar_luminosity\n if (string == \"Z\"): return self.glb[iz0]\n if (string == \"Y\"): return 1.0-self.glb[iz0]-self.glb[ix0]\n if (string == \"X\"): return self.glb[ix0]\n if (string == \"Ys\"): return 1.0-self.glb[user_params_index[\"Zs\"]]-self.glb[user_params_index[\"Xs\"]]\n if (string == \"zsx_s\"): return self.zsx_s\n if (string == \"zsx_0\"): return self.zsx_0\n if (string == \"Fe_H\"): return self.FeH\n if (string == \"M_H\"): return self.MH\n if (string == \"Age\"): return self.glb[iage]\n if (string == \"Teff\"): return self.glb[itemperature]\n if (string == \"Dnu\"): return self.find_large_separation()*self.glb[ifreq_ref]\n if (string == \"numax\"): return self.numax\n if (string == \"Rho\"): return 3.0*self.glb[imass]/(4.0*math.pi*self.glb[iradius]**3)\n if (string == \"g\"): return constants.G*self.glb[imass]/self.glb[iradius]**2\n if (string == \"beta_Sonoi2015\"): return self.beta_Sonoi2015\n if (string == \"b_Kjeldsen2008\"): return self.b_Kjeldsen2008\n\n try:\n return self.glb[user_params_index[string]]\n except KeyError:\n sys.exit(\"ERROR: unrecognised model quantity: \"+string)", "def Illumina_ID(rid):\n index = rid.find(\":\") # finds the first occurance of ':'\n new_id = rid[:index] + \":1:12345\" + rid[index:]\n new_id_split = re.split(\"#|/\", new_id)\n new_id = new_id_split[0] + \" \" + new_id_split[2] + \":Y:0:\" + new_id_split[1]\n return new_id", "def build_set(self, s):\n comma = self.art_type([self.string_type(', ')], baseline=0)\n repr_elems = self.concatenate(s, comma)\n return self.build_container(\n repr_elems, self.left_curly_brace, self.right_curly_brace)", "def tag_ids(self, tag_ids):\n\n self._tag_ids = tag_ids", "def trace_id_set(trace_id: tuple[str, str]) -> None:\n trace_id_cv.set(trace_id)", "def setInlineCuts(self, cutsstring):\n self._checkArgs( { 'cutsstring' : types.StringTypes } )\n\n self.inlineCuts = \";\".join([cut.strip() for cut in cutsstring.strip().split(\"\\n\")])", "def __init__(self, *args):\n _snap.TStrStrIntIntQu_swiginit(self, _snap.new_TStrStrIntIntQu(*args))", "def set_positions(self, x, y, strings_per_station=4,\n station_diameter=20, string_type=ARAString,\n **string_kwargs):\n r = station_diameter/2\n for i in range(strings_per_station):\n angle = 2*np.pi * i/strings_per_station\n x_str = x + r*np.cos(angle)\n y_str = y + r*np.sin(angle)\n self.subsets.append(\n string_type(x_str, y_str, **string_kwargs)\n )", "def nr_s_cell_rsrq(self, nr_s_cell_rsrq):\n\n self._nr_s_cell_rsrq = nr_s_cell_rsrq", "def part_ids(self, part_ids):\n\n self._part_ids = part_ids", "def init_seed_S(minimap_filename, max_missed_5_len, max_missed_5_ratio, max_missed_3_len, max_missed_3_ratio):\n pCS = preClusterSet()\n\n for r in MiniReader(minimap_filename):\n if r.qID == r.sID or r.strand == '-': continue # ignore self and opp strand hits\n if r.qID > r.sID: continue # ignore redundant hits since this is all-for-all\n stat = r.characterize(max_missed_5_len, max_missed_5_ratio, max_missed_3_len, max_missed_3_ratio)\n # stat could be: match, q_contained, s_contained, partial\n # at this first stage, we only care about match\n if stat == 'match':\n pCS.add_seqid_match(r.qID, r.sID)\n elif stat == 'q_contained':\n pCS.add_tucked_match(r.qID, r.sID)\n elif stat == 's_contained':\n pCS.add_tucked_match(r.sID, r.qID)\n else:\n pass # do nothing if it's partial", "def setId(self, *args):\n return _libsbml.Reaction_setId(self, *args)", "def __set_nucleotide_mutation(self, hgvs_str):\n self.__set_substitution_status(hgvs_str)\n self.__set_indel_status(hgvs_str)", "def create(data):\n \n minifig = Minifig(\n minifig_id = data.get('set_num', None),\n name = data.get('set_name', None),\n pieces = data.get('num_parts', None),\n img_url = data.get('set_img_url', None),\n count = data.get('quantity', None))\n \n return minifig", "def resource_ids(self, resource_ids):\n\n self._resource_ids = resource_ids", "def __init__(self, shine_dalgarno_id: str, left_right: Tuple[int, int], gene_id: str):\n super().__init__('shine_dalgarno', left_right=left_right)\n self.id = shine_dalgarno_id\n self.gene_id = gene_id\n\n # populated by link_gene only\n self.gene = None", "def set_moderator(self, moderators):\n self.set_group(self._gp_moderator_name, moderators)", "def fix_samp_id(mystring):\n if isinstance(mystring,int) or isinstance(mystring,float):\n mystring = str(mystring)\n if mystring.startswith('TCGA'):\n return mystring[:12]\n else:\n return mystring", "def _format_gtid_set(self, gtid):\n sid, transaction_id = gtid.split(\":\")\n gtid_set = \"{sid}:1-{next_transaction_id}\".format(\n sid=sid,\n next_transaction_id=int(transaction_id)\n )\n return gtid_set", "def change(self, ids, **kwargs):\n args = {}\n for key, value in kwargs.iteritems():\n argument = make_rpc_name(key)\n (arg, val) = argument_value_convert('torrent-set'\n , argument, value, self.rpc_version)\n args[arg] = val\n\n if len(args) > 0:\n self._request('torrent-set', args, ids, True)\n else:\n ValueError(\"No arguments to set\")", "def sanitize_record_id_set(record_id_set):\n prev_len = len(record_id_set)\n record_id_set = set(\n READ_INDEX_DIR_RE.sub(\"\", DUP_COLON_RE.sub(r\":\", s)) for s in record_id_set\n )\n\n if prev_len != len(record_id_set):\n ## Uh oh, this hack broke something\n raise Exception(\n \"The list of unpaired reads has name collisions \"\n \"if multiple colons are ignored. Your reads are \"\n \"using a naming convetion that the author of this \"\n \"software (illuminPrep) didn't anticipate. My \"\n \"apologies\"\n )\n\n return record_id_set", "def _parse_run_identifiers(run):\n run_identifiers = {'run_id': None, 'backend': None, 'results_dir': None, 'file_name': None}\n if not TypeCheck.is_collection(run):\n run = [run, ] # only run_id\n else:\n run = list(run) # if iterable passed, make it a list explicitly\n run_identifiers['run_id'] = run.pop(0) # pop the first element, which needs to be run_id\n if not len(run):\n pass\n elif len(run) == 1: # only one more element is in the run iterable:\n if run[0].lower() in Config.accepted_backends: # it's backend\n run_identifiers['backend'] = run[0]\n elif os.path.isdir(run[0]): # its results_dir\n run_identifiers['results_dir'] = run[0]\n else: # it needs to be solution_file_name\n run_identifiers['file_name'] = run[0]\n elif len(run) == 2: # two more parameters are in the run iterable\n # the last one needs to be solution_file_name, since backend and res_dir are mutually exclusive\n run_identifiers['file_name'] = run[-1]\n if run[0].lower() in Config.accepted_backends: # the second one is backend\n run_identifiers['backend'] = run[0]\n else: # the second one is results_dir\n run_identifiers['results_dir'] = run[0]\n else:\n raise ResultsAttributeError('Invalid run identifiers!')\n\n return run_identifiers", "def update_strandinfo(self):\n params = ['x','y','rho','theta','spiral','inward','outward']\n infos = {'min':np.min,\n 'max':np.max,\n 'count':lambda x:len(set(x))}\n\n self.strands = {}\n\n for f in ['pwm','channel']:\n self.strands[f] = [ s[f] for s in self.strands_config]\n\n for f in params:\n if f in self.strands_config[0]:\n self.strands[f] = np.array([ s[f] for s in self.strands_config],dtype=np.int16)\n\n for f in ['intensity','last_intensity']:\n self.strands[f] = np.zeros_like(self.strands['x'],dtype=np.int16)\n\n self.strandinfo = { param: { info : None for info in infos} for param in params }\n for p in params:\n for ik,iv in infos.items():\n self.strandinfo[p][ik] = iv(self.strands[p])\n\n print('self.strands:', self.strands)\n print('strandinfo:',self.strandinfo)", "def update_query_settings(s, nb_atts, delimiter='_', **kwargs):\n\n param_map = _compile_param_map(prefix='qry', delimiter=delimiter, **kwargs)\n relevant_kwargs = {v: kwargs[k] for k, v in param_map.items()}\n\n if 'codes' in relevant_kwargs:\n # Check codes and if they do not comply replace by default\n codes = relevant_kwargs['codes']\n\n msg = \"\"\"\n codes are: {}\\n\n type of codes is: {}\\n\n \"\"\".format(codes, type(codes))\n debug_print(msg, V=VERBOSITY)\n\n if _verify_decent_query_codes(codes, nb_atts):\n # Our decency check ensures this conversion will work\n s['codes'] = np.array(codes)\n else:\n msg = \"\"\"\n Provided query codes:\\t{}\\n\n Failed decency tests and are therefore replaced by default codes.\n \"\"\".format(codes)\n warnings.warn(msg)\n s['codes'] = _generate_default_query_code(nb_atts)\n\n s['q_desc'], s['q_targ'], s['q_miss'] = codes_to_query(s['codes'])\n elif 'code' in relevant_kwargs:\n # Wrap single code in extra array for consistency\n msg = \"\"\"\n In file: {}\\n\n I am reading a single query code, i.e.: {}\\n\n Query code is of type: {}\\n\n \"\"\".format(__file__, relevant_kwargs['code'], type(relevant_kwargs['code']))\n debug_print(msg, V=VERBOSITY)\n\n codes = np.atleast_2d(relevant_kwargs['code'])\n update_query_settings(s, nb_atts, qry_codes=codes) # N.B.: Do NOT pass the delimiter here!\n else:\n # Nothing provided in kwargs, we check what is already present.\n codes = s.get('codes', None)\n update_query_settings(s, nb_atts, qry_codes=codes) # N.B.: Do NOT pass the delimiter here!\n\n return s", "def set(ribo, name, alignment, counts, sep, format, force):\n\n set_rnaseq_wrapper(ribo_file = ribo, \n name = name, \n rnaseq_file = alignment,\n rnaseq_counts = counts,\n sep = sep,\n format = format,\n force = force)", "def parse_queue_str(cls, queue_str, keys=0):\n\n queue_dict = {}\n queue_rows = queue_str.split('\\n')\n\n if isinstance(keys, int):\n del_index = keys\n keys = [k.strip(' ')\n for k in queue_rows[keys].strip(' ').split(' ')\n if k != '']\n del queue_rows[del_index]\n\n for row in queue_rows:\n job = [k.strip(' ') for k in row.strip(' ').split(' ') if k != '']\n job_id = int(job[keys.index(cls.QCOL_ID)])\n queue_dict[job_id] = {k: job[i] for i, k in enumerate(keys)}\n\n return queue_dict", "def process_command_line(argv):\n parser = argparse.ArgumentParser(description='Run SMRS algorithm.')\n #Positional args\n parser.add_argument('files', metavar='molec1.ext',\n nargs='+', help='files to be processed')\n #Optional true/false args\n parser.add_argument('-d', '--dihed',\n help='Use dihedral angles as coordinates',\n action='store_true')\n parser.add_argument('-n', '--nonH',\n help='Do not include hydrogen in coords',\n action='store_true')\n parser.add_argument('-e', '--energy', help='Append energy at the end of \\\n molecules vector', action='store_true')\n parser.add_argument('--delCoordCSV', help='Delete CSV file with molecule \\\n coordinates', action='store_true')\n parser.add_argument('--delCoefCSV', help='Delete C matrix CSV file',\n action='store_true')\n parser.add_argument('--folder', help='Name of folder, where representative\\\n molecules will be saved', action='store_true')\n parser.add_argument('--sdf', help='If you want to calculate pybel\\\n fingerprints', action='store_true')\n #Optional args with value\n parser.add_argument('--alpha', help='Specify lambda paramter',\n type=int, metavar='A')\n parser.add_argument('--division', help='Specify type and parametrs\\\n of molecule division into groups. Parametrs should\\\n be separated by comma without spaces', metavar='name')\n parser.add_argument('--pruneStart', help='Specify minimum RMSD by which \\\n molecules should be separated in starting set',\n type=float, metavar='RMSD')\n parser.add_argument('--pruneFinish', help='Specify minimum RMSD by which \\\n representive molecules should be separated',\n type=float, metavar='RMSD')\n parser.add_argument('--format', help='Babel type of input molecules')\n return parser.parse_args(argv)", "def __init__(self, riboswitch_id: str, left_right: Tuple[int, int], name: str, gene_id: str):\n super().__init__('riboswitch', left_right=left_right, name=name)\n self.id = riboswitch_id\n self.gene_id = gene_id\n # set after link_gene is run\n self.gene = None", "def main_ranker(self,pokerhand_string):\n hand_matrix=self._string2matrix(pokerhand_string)\n \n numbers_frequency=hand_matrix.sum(axis=1)\n suits_frequency=hand_matrix.sum(axis=0)\n\n key='subspace'\n\n if self._check_S(numbers_frequency):\n key=key+'HS'\n\n if self._check_RS(suits_frequency):\n key=key+'RS'\n \n ranker={'subspace':self._sub_ranker_RN(numbers_frequency),'subspaceRS':5,'subspaceHS':6,'subspaceHSRS':self._sub_ranker_top(numbers_frequency)}\n\n return ranker[key]", "def _clean_politician_set(self, cleaned_data, geoname_ids):\n\n field_name = 'politician_set'\n charge_ids = [int(elem) for elem in cleaned_data[field_name].strip('|').split('|')]\n charge_ids_copy = list(charge_ids)\n\n if charge_ids:\n\n politician_ids = []\n\n for cityrep_id in geoname_ids:\n if not len(charge_ids_copy):\n break\n found_ids, pol_ids = self.get_politicians_from_cityrep(\n charge_ids_copy,\n cityrep_id\n )\n for pol_id in pol_ids:\n politician_ids.append(pol_id)\n #print(\"\\ncityrep_id: %s\\ncharge_ids_copy: %s\\nfound_ids: %s\\n\" % (cityrep_id, charge_ids_copy, found_ids))\n for charge_id in found_ids:\n charge_ids_copy.remove(charge_id)\n \n if len(charge_ids_copy):\n raise exceptions.ValidationError(u\"Non tutti i politici sono stati recuperati. Sono rimasti fuori i politici con id: %s\" % charge_ids_copy)\n\n lookup = get_lookup(MAP_FIELD_NAME_TO_CHANNEL[field_name])\n values = lookup.get_objects(politician_ids)\n\n else:\n values = []\n\n return values", "def __init__(self, targetString, w, k, t):\n \n self.targetString = targetString\n self.w = w\n self.k = k\n self.t = t # If a minmer occurs more than t times then its entry is removed from the index\n # This is a heuristic to remove repetitive minmers that would create many spurious alignments between\n # repeats\n \n # Hash of minmers to query locations, stored as a map whose keys\n # are minmers and whose values are lists of the start indexes of\n # occurrences of the corresponding minmer in the targetString, \n # sorted in ascending order of index in the targetString.\n #\n # For example if k = 2 and w = 4 and targetString = \"GATTACATTT\"\n #\n # GATTACATTT\n # GATT (AT)\n # ATTA (AT)\n # TTAC (AC)\n # TACA (AC)\n # ACAT (AC)\n # CATT (AT)\n # ATTT (AT)\n #\n # then self.minimizerMap = { \"AT\":(1,6), \"AC\":(4,) }\n self.minimizerMap = {}\n # Code to complete to build index - you are free to define additional functions\n self.size = len(targetString)\n for nuc in range(self.size - self.w + 1):\n window = self.targetString[nuc: nuc + self.w]\n minimer = None # some holder for the minimizer\n\n # implement the window alogrithm from the reading.\n for k_position in range(self.w - k + 1):\n kmer = window[k_position: k_position + k]\n if minimer is None or kmer < minimer[0]: \n minimer = (kmer, nuc + k_position)\n self.minimizerMap.setdefault(minimer[0], set()).add(minimer[1])\n if len(self.minimizerMap[minimer[0]]) > t: del self.minimizerMap[minimer[0]]", "def set_lic_id(self, doc, lic_id):\n # FIXME: this state does not make sense\n self.reset_extr_lics()\n if validations.validate_extracted_lic_id(lic_id):\n doc.add_extr_lic(document.ExtractedLicense(lic_id))\n return True\n else:\n raise SPDXValueError('ExtractedLicense::id')", "def initializationRequest(self, zoomLevelAndBBoxesString, opt=[]):\n split = zoomLevelAndBBoxesString.split(\",\")\n print \"SERVER: initializationRequest() called with argument\", split\n levelsAndBBoxes = []\n for i in range(len(split) / 5):\n levelsAndBBoxes.append((int(split[5*i]),\n float(split[5*i+1]),\n float(split[5*i+2]),\n float(split[5*i+3]),\n float(split[5*i+4])))\n gHeatmapDB.initialize_all_rasters(levelsAndBBoxes)\n return \"initialization_callback()\"", "def add_remixed_circuit_id(self, original_id, remixed_id):\n key = ':'.join(\n [CIRCUIT_RMX_CTS_1, \n str(original_id), \n CIRCUIT_RMX_CTS_2]\n )\n self.RS.sadd(key, remixed_id)", "def encoding_ids(self, encoding_ids):\n # type: (list) -> None\n\n if encoding_ids is not None:\n if not isinstance(encoding_ids, list):\n raise TypeError(\"Invalid type for `encoding_ids`, type has to be `list[string_types]`\")\n\n self._encoding_ids = encoding_ids", "def __allowed_values_correct_string(self):\n strTestName = 'Values of a string (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'string')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramAllowed('parameter1', ['Allowed string #1', 'Allowed string #2'])\n\n RxCSObject.parameter1 = 'Allowed string #2'\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def irmsd(receptor, ligref, ligprobe, receptorprobe=None, reducedmodel=False):\n cutoff = 10.0\n if reducedmodel:\n cutoff = 2.0 * 7.0 # twice the threshold for residue-residue contacts\n\n recBB = receptor.Backbone().CreateRigid() # receptor backbone\n ligrefBB = ligref.Backbone().CreateRigid()\n\n # creating list of residues in interaction:\n recResidues = {}\n ligResidues = {}\n\n receptor = AttractRigidbody(receptor)\n ligref = AttractRigidbody(ligref)\n pairlist = AttractPairList(receptor, ligref, cutoff) # pairlist created on protein backbone + side-chains\n for i in range(len(pairlist)):\n atompair = pairlist[i]\n ligindex = atompair.atlig\n recindex = atompair.atrec\n\n atom = ligref.CopyAtom(ligindex)\n ligResidues[atom.residId] = 1\n\n atom = receptor.CopyAtom(recindex)\n recResidues[atom.residId] = 1\n\n ligResidues = sorted(ligResidues.keys()) # get a list of the ligand's residues in interaction\n recResidues = sorted(recResidues.keys())\n\n ligrefBBInterface = selectListOfResidues(ligrefBB, ligResidues) # interface backbone residues of reference ligand\n ligBBInterface = ligprobe.Backbone() & selectListOfResidues(ligprobe, ligResidues) # interface bb residues of docked ligand\n receptorBBInterface = selectListOfResidues(recBB, recResidues)\n\n if options.superpose:\n ligrefpdb = ligrefBBInterface.CreateRigid()\n ligdockpdb = ligBBInterface.CreateRigid()\n recrefpdb = receptorBBInterface.CreateRigid()\n ref = Rigidbody(ligrefpdb + recrefpdb)\n pred = Rigidbody(ligdockpdb + recrefpdb)\n super = superpose(ref, pred, 0)\n mat = super.matrix\n pred.ApplyMatrix(mat)\n assert len(ligrefBBInterface) == len(ligBBInterface)\n return Rmsd(pred, ref)\n else:\n assert len(ligrefBBInterface) == len(ligBBInterface)\n return Rmsd(ligrefBBInterface.CreateRigid(), ligBBInterface.CreateRigid())", "def set_id(self, value: str) -> None:\n if not isinstance(value, str):\n raise TypeError('id must be a string, not {0}'.format(type(value)))\n self._id = value", "def ReplaceGWsforRTinGRSAZ(gwo, gwi, grsaz, rtid):\n M = grsaz\n for r in M:\n if r[0] == gwo and r[1] == rtid:\n r[0] = gwi\n return M", "def __init__(__self__, *,\n storage_space_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if storage_space_ids is not None:\n pulumi.set(__self__, \"storage_space_ids\", storage_space_ids)", "def setSnrMin(_snrMin):\n \n simuConfig[\"SNR.MIN\"] = _snrMin", "def get_scene_rigs():\n\n if _MANAGER_RIG_ATTRIBUTE:\n try:\n rigs = [x.split(\".\")[0] for x in cmds.ls(\n \"*.{}\".format(_MANAGER_RIG_ATTRIBUTE), recursive=True)]\n except RuntimeError:\n raise ValueError(\"Invalid attribute key: {} - is not a valid \"\n \"attribute key to set on the \"\n \"MGEAR_CACHE_MANAGER_RIG_ATTRIBUTE variable\"\n .format(_MANAGER_RIG_ATTRIBUTE))\n else:\n rigs = [x.split(\".\")[0] for x in cmds.ls(\"*.is_rig\", recursive=True)]\n\n # we query the gpu caches node rig_link custom attribute in the scene\n # in order to keep the returned value accurate.\n # If we have a scene in which a rig has already been cached and the\n # reference unloaded we can't find the rig node anymore on the scene so\n # we use the custom attribute added by the load_gpu_cache method to query\n # caches been created by the cache manager.\n [rigs.append(cmds.getAttr(\"{}.rig_link\".format(x)))\n for x in cmds.ls(type=\"gpuCache\")\n if cmds.objExists(\"{}.rig_link\".format(x))\n and cmds.getAttr(\"{}.rig_link\".format(x)) not in rigs]\n\n return rigs or None", "def build_rig(self):\n\n # create rig part top nodes\n self.create_part_master()\n\n # Get all the relevant part info\n prefix = self.prefix\n options = self.options\n anim_ctrls = self.anim_ctrls\n bind_jnts = self.bind_joints\n hooks = self.hooks\n ctrl_grps = self.ctrl_grps\n jnt_grps = self.jnt_grps\n noxform_grp = self.noxform_grp\n world_scale_attr = self.hooks[0] + '.worldScale'\n\n\n setupNeck.setup_neck()\n setupNeck.setup_head()\n autoRig.apply_shapes()\n\n\n #\n # mc.parent ('bottomNeckSkin_Mid_jnt', 'topNeckSkin_Mid_jnt', jnt_grps[0])\n # mc.parent ('neck_rig', noxform_grp)\n # mc.parent ('neck_ctrls', ctrl_grps[0])\n # mc.parent ('rotateReader_grp', jnt_grps[0])\n #\n # mc.parent ('drivenArm_chest_Mid_bind', jnt_grps[0])\n #\n # scales = [u'neck01_Mid_bind', u'neck02_Mid_bind', u'neck03_Mid_bind', u'neckEnd_Mid_jnt',u'headTop_Mid_bind', u'headRear_Mid_bind', u'headSide_Lt_bind', u'headSide_Rt_bind']\n # utils.break_connections(nodes=scales, attrs='s')", "def set_id(self, refobj, identifier):\n cmds.setAttr(\"%s.identifier\" %refobj, identifier)", "async def setWriters(self, eventID: str, writers: Iterable[str]) -> None:", "def setships(uid, secret, ships):\n try:\n SERVER.validate_player(uid, secret)\n SERVER.setships(uid, ships)\n except RoboBattleshipException as e:\n return JsonResponse.error(e)\n except:\n LOG.exception(\"Failed to set ships '%s' for player '%s'\", ships, uid)\n return JsonResponse.error(101)\n return JsonResponse.success()", "def map_and_save_gene_ids(hit_genes_location, all_detectable_genes_location=''):\n\n standardized_hits = [] # [primary_set]\n standardized_secondary_hits = [] # [secondary_set=None]\n\n if type(hit_genes_location) == str or isinstance(hit_genes_location, pathlib.PurePath):\n # log.info('codepath 1')\n standardized_hits = [cast_external_refs_to_internal_ids(hit_genes_location)]\n standardized_secondary_hits = [None]\n\n if type(hit_genes_location) == tuple:\n # log.info('codepath 2')\n standardized_hits = [cast_external_refs_to_internal_ids(hit_genes_location[0])]\n standardized_secondary_hits = [cast_external_refs_to_internal_ids(hit_genes_location[1])]\n\n if type(hit_genes_location) == list:\n # log.info('codepath 3')\n for sub_hit_genes_location in hit_genes_location:\n # log.info('codepath 3.0')\n if type(sub_hit_genes_location) == str or isinstance(sub_hit_genes_location, pathlib.PurePath):\n # log.info('codepath 3.1')\n standardized_hits += [cast_external_refs_to_internal_ids(sub_hit_genes_location)]\n standardized_secondary_hits += [None]\n if type(sub_hit_genes_location) == tuple:\n # log.info('codepath 3.2')\n standardized_hits += [cast_external_refs_to_internal_ids(sub_hit_genes_location[0])]\n standardized_secondary_hits += [cast_external_refs_to_internal_ids(sub_hit_genes_location[1])]\n\n log.debug('standardized primary hits:\\n\\t%s' % standardized_hits)\n log.debug('standardized secondary_hits:\\n\\t%s' % standardized_secondary_hits)\n\n dump_object(Dumps.analysis_set_bulbs_ids, (standardized_hits, standardized_secondary_hits))\n\n if all_detectable_genes_location:\n background_set = cast_external_refs_to_internal_ids(all_detectable_genes_location)\n # print(background_set)\n primary_set = [y for x in standardized_hits for y in x] # flattens the mapped ids list\n # print(primary_set)\n\n formatted_secondary_hits = [_l\n if _l is not None\n else []\n for _l in standardized_secondary_hits]\n\n sec_set = [y for x in formatted_secondary_hits for y in x]\n\n re_primary_set = set()\n for _id in primary_set:\n if type(_id) == str or type(_id) == int:\n re_primary_set.add(_id)\n else:\n re_primary_set.add(_id[0])\n\n primary_set = re_primary_set\n\n re_secondary_set = set()\n for _id in sec_set:\n if type(_id) == str or type(_id) == int:\n re_secondary_set.add(_id)\n else:\n re_secondary_set.add(_id[0])\n\n sec_set = re_primary_set\n\n if type(background_set[0]) == str or type(background_set[0]) == int: # unweighted\n background_set = list(set(background_set).union(primary_set).union(sec_set))\n\n else:\n bck_set = {_id[0] for _id in background_set}\n bck_set = list(bck_set)\n\n if not primary_set.issubset(bck_set):\n log.info('Nodes ids %s are missing in background set and are added with weight 0' %\n (primary_set - bck_set))\n background_set += [(_id, 0) for _id in (primary_set - bck_set)]\n\n if not sec_set.issubset(bck_set):\n log.info('Secondary set nodes ids %s are missing in background set and are added '\n 'with weight 0' % (sec_set - bck_set))\n background_set += [(_id, 0) for _id in (sec_set - bck_set)]\n\n else:\n background_set = []\n\n dump_object(Dumps.background_set_bulbs_ids, background_set)\n\n return standardized_hits, standardized_secondary_hits, background_set", "def set_chrom_dict():\n chrom_dict = {\n str(i):'chr' + str(i) for i in range(1, MAXCHROM)\n }\n chrom_dict.update({\n 'X':'chr23',\n 'Y':'chr24',\n 'XY':'chr25',\n 'M':'chr26',\n 'MT':'chr26',\n 'chrX':'chr23',\n 'chrY':'chr24',\n 'chrXY':'chr25',\n 'chrM':'chr26',\n 'chrMT':'chr26'\n })\n return chrom_dict, MAXCHROM", "def _set_moderator(self, groupId, moderator_pos, newModUID, key, newkey):\n if len(groupId) != self.id_length:\n return error.error.main.invalid_length(u'群組/房間ID', self.id_length)\n\n if newModUID is None and newkey is None:\n delete_mod = True\n else:\n if len(newModUID) != self.id_length:\n return error.error.main.invalid_length(u'管理員UID', self.id_length)\n elif moderator_pos > 3 or moderator_pos < 0:\n return error.error.main.invalid_thing(u'副管位置序號', moderator_pos)\n delete_mod = False\n\n mod_col_dict = {1: 'moderator1', 2: 'moderator2', 3: 'moderator3'}\n mod_sha_dict = {1: 'moderator1_sha', 2: 'moderator2_sha', 3: 'moderator3_sha'}\n\n cmd_check = u'SELECT * FROM group_ban WHERE (admin_sha = %(key)s OR {} = %(key)s) AND groupId = %(gid)s'.format(mod_sha_dict[moderator_pos])\n cmd_check_dict = {'key': hashlib.sha224(key).hexdigest(),\n 'gid': groupId}\n results = self.sql_cmd(cmd_check, cmd_check_dict)\n \n if results is not None:\n cmd = u'UPDATE group_ban SET {} = %(mod)s, {} = %(newkey)s WHERE groupId = %(id)s'.format(mod_col_dict[moderator_pos],\n mod_sha_dict[moderator_pos])\n cmd_dict = {'id': groupId, \n 'mod': None if delete_mod else newModUID, \n 'newkey': None if delete_mod else hashlib.sha224(newkey).hexdigest()}\n self.sql_cmd(cmd, cmd_dict)\n return True\n else:\n return error.error.main.incorrect_password_or_insufficient_permission()", "def dict_replace_idstr_recursive(d, cid, xid):\n assert 'params' in d\n # assert d['params'].has_key('id')\n\n # print \"dict_replace_idstr_recursive\", print_dict(d)\n\n if cid is None:\n return d\n \n # if cid is not None:\n # change param 'id' with loop marker and number\n d['params']['id'] = \"%s%s%s\" % (cid, loop_delim, xid)\n logger.debug(\"dict_replace_idstr_recursive newid = %s\", d['params']['id'])\n\n # change param 'inputs'\n if 'inputs' in d['params']:\n for ink, inv in list(d['params']['inputs'].items()):\n # print \" cid = %s, id = %s, ink = %s, inv = %s\" % (\n # cid, d['params']['id'], ink, inv.keys())\n if 'bus' in inv:\n # print \" bus old\", inv['bus']\n inv['bus'] = re.sub(\n r'%s' % (cid, ),\n r'%s' % (d['params']['id'], ),\n inv['bus'])\n # print \" bus new\", inv['bus']\n \n # change param '?'\n\n # change params 'outputs'\n if 'outputs' in d['params']:\n for outk, outv in list(d['params']['outputs'].items()):\n # print \" cid = %s, id = %s, outk = %s, outv = %s\" % (\n # cid, d['params']['id'], outk, outv.keys())\n # if outv.has_key('bus'):\n for outvk in [k_ for k_ in list(outv.keys()) if k_ in ['trigger', 'buscopy']]:\n # if outv.has_key('bus'):\n # print \" bus old\", outv['bus']\n outv[outvk] = re.sub(\n r'%s' % (cid, ),\n r'%s' % (d['params']['id'], ),\n outv[outvk])\n # print \" bus new\", outv['bus']\n \n if 'graph' in d['params'] and type(d['params']['graph']) is not str:\n tgraph = OrderedDict()\n for k, v in list(d['params']['graph'].items()):\n # v['params']['id'] = k\n\n # replace ids in dict val\n v = dict_replace_idstr_recursive(d = v, cid = k, xid = xid)\n # replace ids in dict key\n k_ = v['params']['id']\n # reassemble\n tgraph[k_] = v\n d['params']['graph'][k] = v\n # copy new dict to graph\n # print \"tgraph\", tgraph\n d['params']['graph'] = tgraph\n # print \"d['params']['graph']\", d['params']['graph']\n return d", "def seer_pick(g):\n seer_id = game_state['seer_id']\n investigated_id = random_pick(\n remove_id(all_ids(g), seer_id))\n return investigated_id", "def replace(self, string):\n # self.values is assigned in mix_iterator()\n for k, v in self.job.items():\n string = string.replace(k, v)\n return string", "def _setVals(self, qubit_id=0):\n self.qubit_id = qubit_id", "def parse_int_set(nputstr=\"\"):\n selection = set()\n invalid = set()\n # tokens are comma seperated values\n tokens = [x.strip() for x in nputstr.split(',')]\n for i in tokens:\n try:\n # typically tokens are plain old integers\n selection.add(int(i))\n except:\n # if not, then it might be a range\n try:\n token = [int(k.strip()) for k in i.split('-')]\n if len(token) > 1:\n token.sort()\n # we have items seperated by a dash\n # try to build a valid range\n first = token[0]\n last = token[len(token)-1]\n for x in range(first, last+1):\n selection.add(x)\n except:\n # not an int and not a range...\n invalid.add(i)\n # Report invalid tokens before returning valid selection\n # print \"Invalid set: \" + str(invalid)\n return selection", "def fit(self, int_spec, cr_spec=None, minos=0., refit=True, **kwargs):\n\n self._int_spec = lambda EGeV, **kwargs: int_spec(EGeV, **kwargs)\n\n if cr_spec is not None:\n self._cr_spec = lambda EGeV, **kwargs: cr_spec(EGeV, **kwargs)\n else:\n self._cr_spec = None\n\n fitarg = self.fill_fitarg(**kwargs)\n\n t1 = time.time()\n self.run_migrad(fitarg, **kwargs)\n\n try:\n self._m.hesse()\n logging.debug(\"Hesse matrix calculation finished\")\n except RuntimeError as e:\n logging.warning(\n \"*** Hesse matrix calculation failed: {0}\".format(e)\n )\n\n logging.debug(self._m.fval)\n self.__repeat_migrad(**kwargs)\n logging.debug(self._m.fval)\n\n fmin = self._m.fmin\n\n if not fmin.hesse_failed:\n try:\n self._corr = self._m.np_matrix(correlation=True)\n except:\n self._corr = -1\n\n logging.debug(self._m.values)\n\n if self._m.valid and minos:\n for k in self._par_names:\n if kwargs['fix'][k]:\n continue\n self._m.minos(k, minos)\n logging.debug(\"Minos finished\")\n\n else:\n self.__print_failed_fit()\n\n logging.info('fit took: {0}s'.format(time.time() - t1))\n for k in self._par_names:\n if kwargs['fix'][k]:\n err = np.nan\n else:\n err = self._m.errors[k]\n logging.info('best fit {0:s}: {1:.5e} +/- {2:.5e}'.format(k, self._m.values[k], err))", "def __init__(self, string):\n input_params = string.split(\":\")\n self.name = input_params[0]\n self.levels = int(input_params[1])\n self.bfact = int(input_params[2])\n self.res_probs = input_params[3].split(',')\n\n self.residues = []\n self.weights = []\n for res_prob in self.res_probs:\n name, prob = res_prob.split(\"-\")\n self.residues.append(name)\n self.weights.append(float(prob))", "def override_paramset(self, override_str):\n\n paramset = ParamSet()\n if not override_str:\n return paramset\n\n override = eval(override_str, {}, {})\n if not override:\n return paramset\n\n for override_name in override:\n # The override can have a node_name/parm format which allows for point\n # instance overrides to override parms in a network.\n\n cached_override = self.override_cache.get(override_name, None)\n if cached_override is not None:\n # Hint to just skip\n if cached_override == -1:\n continue\n if isinstance(cached_override, PBRTParam):\n # textures which can't be overriden\n paramset.add(cached_override)\n continue\n pbrt_name, pbrt_type, tuple_names = cached_override\n if tuple_names:\n value = [override[x] for x in tuple_names]\n else:\n value = override[override_name]\n pbrt_param = PBRTParam(pbrt_type, pbrt_name, value)\n paramset.add(pbrt_param)\n continue\n\n override_match = self.override_pat.match(override_name)\n spectrum_type = override_match.group(\"spectrum\")\n parm_name = override_match.group(\"parm\")\n override_node = override_match.group(\"node\")\n if override_node is not None and override_node != self.name:\n self.override_cache[override_name] = -1\n continue\n\n # There can be two style of \"overrides\" one is a straight parm override\n # which is similar to what Houdini does. The other style of override is\n # for the spectrum type parms. Since spectrum parms can be of different\n # types and the Material Overrides only support \"rgb\" we are limited\n # in the types of spectrum overrides we can do. To work around this we'll\n # support a different style, override_parm:spectrum_type. If the parm name\n # ends in one of the \"rgb/color\" types then we'll handle it differently.\n # TODO add a comment as to what the value would look like\n\n # NOTE: The material SOP will use a parm style dictionary if there\n # parm name matches exactly\n # ie) if there is a color parm you will get\n # {'colorb':0.372511,'colorg':0.642467,'colorr':0.632117,}\n # But if the parm name doesn't match (which we are allowing\n # for you will get something like this -\n # {'colora':(0.632117,0.642467,0.372511),}\n\n # Once we have a parm name, we need to determine what \"style\" it is.\n # Whether its a hou.ParmTuple or hou.Parm style.\n tuple_names = tuple()\n parm_tuple = self.node.parmTuple(parm_name)\n if parm_tuple is None:\n # We couldn't find a tuple of that name, so let's try a parm\n parm = self.node.parm(parm_name)\n if parm is None:\n # Nope, not valid either, let's move along\n self.override_cache[override_name] = -1\n continue\n # if its a parm but not a parmtuple it must be a split.\n parm_tuple = parm.tuple()\n # we need to \"combine\" these and process them all at once and\n # then skip any other occurances. The skipping is handled by\n # the overall caching mechanism. self.override_cache\n tuple_names = tuple([x.name() for x in parm_tuple])\n\n # This is for wrangling parm names of texture nodes due to having a\n # signature parm.\n pbrt_parm_name = self.pbrt_parm_name(parm_tuple.name())\n\n if spectrum_type is None and tuple_names:\n # This is a \"traditional\" override, no spectrum or node name prefix\n value = [override[x] for x in tuple_names]\n pbrt_param = self._hou_parm_to_pbrt_param(\n parm_tuple, pbrt_parm_name, value\n )\n elif spectrum_type in (\"spectrum\", \"xyz\", \"blackbody\"):\n pbrt_param = PBRTParam(\n spectrum_type, pbrt_parm_name, override[override_name]\n )\n elif not tuple_names:\n pbrt_param = self._hou_parm_to_pbrt_param(\n parm_tuple, pbrt_parm_name, override[override_name]\n )\n else:\n raise ValueError(\"Unable to wrangle override name: %s\" % override_name)\n\n paramset.add(pbrt_param)\n\n # From here to the end of the loop is to allow for caching\n\n if pbrt_param.type == \"texture\":\n self.override_cache[override_name] = pbrt_param\n continue\n\n # we are making an assumption a split parm will never be a spectrum\n # or have a node prefix. The Material SOP doesn't allow for it as well.\n for name in tuple_names:\n # The -1 means \"continue\"\n self.override_cache[name] = -1\n # Sanity check\n if tuple_names and override_name not in tuple_names:\n raise ValueError(\n \"Override name: %s, not valid for a parmTuple\" % override_name\n )\n # override_name must match one of the tuple_names\n self.override_cache[override_name] = (\n pbrt_param.name,\n pbrt_param.param_type,\n tuple_names,\n )\n return paramset", "def setstring(self):\n self._str = 's '+' '.join([self.src, self.start, self.size,\n self.strand, self.srcSize, self.text])+'\\n'", "def __init__(self, name, stru):\n ParameterSet.__init__(self, name)\n self.stru = stru\n self.addParameterSet(UnitCellParSet(self))\n self.scatterers = []\n\n self._update = False\n\n cdict = {}\n for s in stru.scatterers():\n el = s.element_symbol()\n i = cdict.get(el, 0)\n sname = \"%s%i\"%(el,i)\n cdict[el] = i+1\n scatterer = ScattererParSet(sname, self, i)\n self.addParameterSet(scatterer)\n self.scatterers.append(scatterer)\n\n # Constrain the lattice\n from diffpy.srfit.structure.sgconstraints import _constrainSpaceGroup\n symbol = self.getSpaceGroup()\n _constrainSpaceGroup(self, symbol)\n\n return", "def get_camp_ids_names_containing_str(marketer_id, string):\n all_campaigns = outb.get_campaigns_per_marketer(marketer_id).get(marketer_id[0])\n return [{\"id\": x.get(\"id\"), \"name\": x.get(\"name\")} for x in all_campaigns if string in x[\"name\"]]", "def decode_block_string(self, block_string):\n assert isinstance(block_string, str)\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n if 's' not in options or len(options['s']) != 2:\n raise ValueError('Strides options should be a pair of integers.')\n\n self.input_filters = int(options['i'])\n self.output_filters = int(options['o'])\n self.dw_kernel_size = self._parse_ksize(options['k'])\n self.expand_kernel_size = self._parse_ksize(options['a'])\n self.project_kernel_size = self._parse_ksize(options['p'])\n self.num_repeat = int(options['r'])\n self.identity_skip = ('noskip' not in block_string)\n self.se_ratio = float(options['se']) if 'se' in options else None\n self.expand_ratio = int(options['e'])\n self.strides = [int(options['s'][0]), int(options['s'][1])]\n self.swish = 'sw' in block_string\n self.dilated = 'dilated' in block_string\n\n return self", "def set_read_group( in_bam_path, out_bam_path, id:str, pl:str, lb:str, sm:str, pu:str, threads=4 ):\n\n \"\"\"\n read_groups(set/dict) : set or dictionary which contains read groups. The dictionary should have the format { read_group_id (str)\n { 'ID': ID, 'LB':library,\n 'PL':platform,\n 'SM':sampleLib,\n 'PU':readGroup }\n \"\"\"\n\n read_groups = {id:{ 'ID': id, 'LB':lb,\n 'PL':pl,\n 'SM':sm,\n 'PU':pu }}\n\n with pysam.AlignmentFile(in_bam_path, threads = threads) as input_bam:\n\n input_header = input_bam.header.as_dict()\n\n # Write provenance information to BAM header\n write_program_tag(\n input_header,\n program_name='bamReadGroupFormat',\n command_line=\" \".join(\n sys.argv),\n version=singlecellmultiomics.__version__,\n description=f'SingleCellMultiOmics read group formatting, executed at {datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")}')\n\n with sorted_bam_file(out_bam_path, header=input_header, read_groups=read_groups, input_is_sorted=True) as out:\n print('Started writing')\n for read in input_bam:\n rg_id = id\n read.set_tag('RG',rg_id)\n out.write(read)" ]
[ "0.50190747", "0.50190747", "0.4670787", "0.45393595", "0.45045197", "0.44899312", "0.44867754", "0.4224754", "0.42206508", "0.4202021", "0.4202021", "0.41952497", "0.41925597", "0.41226864", "0.41184327", "0.40634874", "0.40553337", "0.40470064", "0.40200716", "0.40155357", "0.4013787", "0.3996931", "0.39936945", "0.3992161", "0.39744848", "0.39669463", "0.39614373", "0.3939572", "0.39366975", "0.3930893", "0.3928163", "0.3924274", "0.39130297", "0.39106008", "0.3887866", "0.3879366", "0.3851222", "0.38448095", "0.38376158", "0.3833368", "0.38286734", "0.38177988", "0.3817791", "0.38142192", "0.38108036", "0.38077018", "0.38050616", "0.3801828", "0.3796488", "0.37946516", "0.37889126", "0.37863111", "0.37807742", "0.37784702", "0.37663716", "0.37650818", "0.37625587", "0.37606323", "0.3753843", "0.37402874", "0.37389824", "0.37308592", "0.37290198", "0.37251127", "0.37238285", "0.3718085", "0.3711977", "0.37115014", "0.370559", "0.3703484", "0.37030134", "0.36998397", "0.36965615", "0.36963117", "0.368966", "0.36847472", "0.36836165", "0.36834806", "0.36781093", "0.36777207", "0.36701035", "0.36696932", "0.36678833", "0.36672187", "0.36648417", "0.3664737", "0.3661428", "0.36613494", "0.3660403", "0.36571", "0.36559418", "0.36550385", "0.3650425", "0.36503965", "0.36503673", "0.36499798", "0.3646367", "0.36454773", "0.3642404", "0.36419192" ]
0.6751865
0
Dump utils image template.py as a Dict. The key is like "simnet/lndbtc"
def _dump_template(self, utils_image) -> Dict[str, str]: cmd = f"docker run -i --rm --entrypoint python {utils_image}" p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT) out, _ = p.communicate(input=SCRIPT.encode()) output = out.decode() if p.returncode != 0: self._logger.error("Failed to dump %s template.py\n%s", utils_image, output) raise RuntimeError("Failed to dump %s template.py" % utils_image) lines = output.splitlines() result = {} for line in lines: key, value = line.split() result[key] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _driver_template_data(self):\n return {\n 'driver_module': self.driver_modulename(),\n 'file': self.driver_relative_path(),\n 'author': self.metadata.author,\n 'driver_name': self.metadata.driver_name,\n 'driver_path': self.metadata.driver_path,\n 'release_notes': self.metadata.notes,\n 'constructor': self.metadata.constructor,\n 'full_instrument_lower': self.metadata.driver_name.lower(),\n 'full_instrument_camelcase': self.driver_name_camelcase(),\n }", "def create_dump(self) -> Dict[str, str]:\n return self.http.post(self.config.paths.dumps)", "def get_template_data(self) -> dict:\n template_data = self._get_template_data()\n\n @dataclass\n class FileEntry:\n \"\"\"Provides an entry into manifest object.\"\"\"\n\n name: str\n size: str\n md5: Optional[str]\n\n template_data[\"resource_files\"] = [\n FileEntry(entry.name, convert_size(entry.size), entry.md5)\n for entry in self.resource.get_manifest().entries.values()\n if not entry.name.startswith(\"statistics\")\n and entry.name != \"index.html\"]\n template_data[\"resource_files\"].append(\n FileEntry(\"statistics/\", \"\", \"\"))\n return template_data", "def generate_cfg():\n \n if not os.path.exists(cfg_path):\n os.mkdir(cfg_path)\n \n for img_path in get_template_paths():\n extractor = BlockExtractor(img_path)\n extractor.get_cfg()\n for block in extractor.get_blocks():\n img = BlockParser(img_path, block).block_image()\n #cv.imshow(\"Block\", img)\n #cv.waitKey() & 0xFF", "def _get_image_type_templates():\n yaml_file = os.path.join(ROOT_DIR, 'docker', 'image_types.yaml')\n all_templates = yaml_utils.read(yaml_file)\n return all_templates", "def genConvOnboardingInfoJsonFile( sztpOnboardingInfo, onboardingFileJson ):\n template = {\n \"boot-image\": {\n \"os-name\": str,\n \"os-version\": str,\n \"download-uri\": list, # of uri strings\n \"image-verification\": [ {\n \"hash-algorithm\": str,\n \"hash-value\": str } ],\n },\n \"configuration-handling\": str,\n \"pre-configuration-script\": str,\n \"configuration\": str,\n \"post-configuration-script\": str\n }\n\n def verifyBootImage( template, sztpBootImage ):\n \"\"\"Verify boot image is correct\"\"\"\n def verifyImageVerification( imageVerification ):\n \"\"\"Verify instance of image-verification is correct\"\"\"\n if \"hash-algorithm\" in imageVerification:\n assert imageVerification[ \"hash-algorithm\" ] == \\\n \"ietf-sztp-conveyed-info:sha-256\",\\\n \"Unsupported hash-algorithm\"\n assert \"hash-value\" in imageVerification, \\\n \"Expected hash-value not present\"\n hashValue = imageVerification[ \"hash-value\" ]\n # Verify hashValue appears to be a yang:hex-string\n assert len( hashValue ) == 32 * 3 - 1 and \\\n all( c == ':' or c in string.hexdigits for c in hashValue ), \\\n \"hash-value invalid\"\n\n def verifyImageVerificationList( template, sztpImageVerification ):\n \"\"\"Verify image-verification list is correct\"\"\"\n assert isinstance( sztpImageVerification, list ), \\\n \"Expected list\"\n for imageVer in sztpImageVerification:\n assert verifyDictTypes( template, imageVer ), \"Unexpected value types\"\n assert set( imageVer.keys() ).issubset( set( template.keys() ) ), \\\n \"Unexpected keys in dict\"\n verifyImageVerification( imageVer )\n\n mandatory = [ \"download-uri\" ]\n assert isinstance( sztpBootImage, dict ), \"Expected dict\"\n assert set( sztpBootImage.keys() ).issubset( template.keys() ), \\\n \"Unexpected keys in dict\"\n assert verifyDictTypes( template, sztpBootImage ), \\\n \"Unexpected value types\"\n assert set( mandatory ).issubset( sztpBootImage ), \\\n \"Mandatory keys not present\"\n if \"image-verification\" in sztpBootImage:\n verifyImageVerificationList( template[ \"image-verification\" ][ 0 ],\n sztpBootImage[ \"image-verification\" ] )\n\n # verify onboarding-info dict is correctly constructed\n assert isinstance( sztpOnboardingInfo, dict ), \"Expected dict\"\n assert set( sztpOnboardingInfo.keys() ).issubset( template.keys() ), \\\n \"Unexpected keys in dict\"\n assert verifyDictTypes( template, sztpOnboardingInfo ), \\\n \"Unexpected values types\"\n assert sztpOnboardingInfo[ \"configuration-handling\" ] == \"replace\", \\\n \"Unsupported configuration-handling value\"\n if \"boot-image\" in sztpOnboardingInfo:\n verifyBootImage( template[ \"boot-image\" ],\n sztpOnboardingInfo[ \"boot-image\" ] )\n\n # construct outer dictionary and convert to json\n ietfOnboardingInfo = { \"ietf-sztp-conveyed-info:onboarding-information\":\n sztpOnboardingInfo }\n jsonIetfOnboardingInfo = json.dumps( ietfOnboardingInfo, indent=4 )\n\n # save to file\n with open( onboardingFileJson, \"w\" ) as tmpFile:\n tmpFile.write( jsonIetfOnboardingInfo )", "def create_json(self):\n data = {\"image_id\": self.ids, \"img_path\": self.img_paths, \"bg\": self.bgs}\n if hasattr(self, \"bbox\"):\n data[\"bbox\"] = self.bbox\n if hasattr(self, \"masks\"):\n data[\"masks\"] = self.masks\n with open(f\"{self.save_path}{self.name}/json/images_info.json\", \"w\") as f:\n json.dump(data, f)", "def gen_obj(image_name):\n\n recon_options['dataroot'] = f'{dir_path}/static'\n recon_options['out_path'] = recon_options['dataroot']\n recon_options['results_path'] = recon_options['dataroot']\n recon_options['ckpt_path'] = f'{dir_path}/checkpoints/pifuhd.pt'\n recon_options['load_netMR_checkpoint_path'] = recon_options['ckpt_path']\n recon_options['checkpoints_path'] = f'{dir_path}/checkpoints'\n recon_options['loadSize'] = 1024\n recon_options['resolution'] = 512\n\n path = reconWrapper(DotDict(recon_options), True, image_name)\n\n return path", "def _test_template_data(self):\n chars=string.ascii_uppercase + string.digits\n id = ''.join(random.choice(chars) for x in range(6))\n\n return {\n 'test_module': self.test_modulename(),\n 'driver_module': self.driver_modulename(),\n 'driver_dir': self.driver_dir(),\n 'file': self.driver_relative_path(),\n 'author': self.metadata.author,\n 'driver_name': self.metadata.driver_name,\n 'constructor': self.metadata.constructor,\n 'full_instrument_lower': self.metadata.driver_name.lower(),\n 'full_instrument_camelcase': self.driver_name_camelcase(),\n }", "def _get_template_data(snapshot_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n snapshot_id = snapshot_data['id']\n Linux.logger.debug(f'Compiling template data for Snapshot #{snapshot_id}')\n data: Dict[str, Any] = {key: None for key in Linux.template_keys}\n\n data['host_sudo_passwd'] = settings.NETWORK_PASSWORD\n data['snapshot_identifier'] = f'{snapshot_data[\"vm\"][\"id\"]}_{snapshot_data[\"id\"]}'\n data['vm_identifier'] = f'{snapshot_data[\"vm\"][\"project\"][\"id\"]}_{snapshot_data[\"vm\"][\"id\"]}'\n\n # Get the ip address of the host\n host_ip = None\n for interface in snapshot_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_ip = interface['ip_address']\n break\n if host_ip is None:\n error = f'Host ip address not found for the server # {snapshot_data[\"vm\"][\"server_id\"]}'\n Linux.logger.error(error)\n snapshot_data['errors'].append(error)\n return None\n data['host_ip'] = host_ip\n return data", "def _get_template_data(vm_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n vm_id = vm_data['id']\n Windows.logger.debug(f'Compiling template data for VM #{vm_id}')\n data: Dict[str, Any] = {key: None for key in Windows.template_keys}\n\n data['vm_identifier'] = f'{vm_data[\"project\"][\"id\"]}_{vm_id}'\n data['image_answer_file_name'] = vm_data['image']['answer_file_name']\n\n data['image_filename'] = vm_data['image']['filename']\n # check if file exists at /mnt/images/HyperV/VHDXs/\n path = '/mnt/images/HyperV/VHDXs/'\n child_span = opentracing.tracer.start_span('vm_image_file_download', child_of=span)\n if not Windows.check_image(data['image_filename'], path):\n # download the file\n downloaded, errors = Windows.download_image(data['image_filename'], path)\n if not downloaded:\n for error in errors:\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n child_span.finish()\n\n # RAM is needed in MB for the builder but we take it in in GB (1024, not 1000)\n data['ram'] = vm_data['ram'] * 1024\n data['cpu'] = vm_data['cpu']\n data['dns'] = vm_data['dns']\n\n # Generate encrypted passwords\n data['admin_password'] = Windows._password_generator(size=12)\n # Also save the password back to the VM data dict\n vm_data['admin_password'] = data['admin_password']\n\n # Check for the primary storage\n if not any(storage['primary'] for storage in vm_data['storages']):\n error = 'No primary storage drive found. Expected one primary storage drive'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n data['storages'] = vm_data['storages']\n data['storage_type'] = vm_data['storage_type']\n\n # Get the Networking details\n data['vlans'] = []\n data['ip_addresses'] = []\n data['default_ips'] = []\n data['default_gateway'] = ''\n data['default_netmask_int'] = ''\n data['default_vlan'] = ''\n\n # The private IPs for the VM will be the one we need to pass to the template\n vm_data['ip_addresses'].reverse()\n ip_addresses = []\n subnets = []\n for ip in vm_data['ip_addresses']:\n if IPAddress(ip['address']).is_private():\n ip_addresses.append(ip)\n subnets.append({\n 'address_range': ip['subnet']['address_range'],\n 'vlan': ip['subnet']['vlan'],\n 'id': ip['subnet']['id'],\n })\n # Removing duplicates\n subnets = [dict(tuple_item) for tuple_item in {tuple(subnet.items()) for subnet in subnets}]\n # sorting nics (each subnet is one nic)\n for subnet in subnets:\n non_default_ips = []\n gateway, netmask_int = subnet['address_range'].split('/')\n vlan = str(subnet['vlan'])\n data['vlans'].append(vlan)\n\n for ip_address in ip_addresses:\n address = ip_address['address']\n if ip_address['subnet']['id'] == subnet['id']:\n # Pick the default ips if any\n if vm_data['gateway_subnet'] is not None:\n if subnet['id'] == vm_data['gateway_subnet']['id']:\n data['default_ips'].append(address)\n data['default_gateway'] = gateway\n data['default_netmask_int'] = netmask_int\n data['default_vlan'] = vlan\n continue\n # else store the non gateway subnet ips\n non_default_ips.append(address)\n\n if len(non_default_ips) > 0:\n data['ip_addresses'].append({\n 'ips': non_default_ips,\n 'gateway': gateway,\n 'netmask_int': netmask_int,\n 'vlan': vlan,\n })\n\n # Add locale data to the VM\n data['language'] = 'en_IE'\n data['timezone'] = 'GMT Standard Time'\n\n # Get the host name of the server\n host_name = None\n for interface in vm_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_name = interface['hostname']\n break\n if host_name is None:\n error = f'Host name is not found for the server # {vm_data[\"server_id\"]}'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n # Add the host information to the data\n data['host_name'] = host_name\n data['network_drive_url'] = settings.NETWORK_DRIVE_URL\n data['vms_path'] = settings.HYPERV_VMS_PATH\n\n return data", "def img_map(ts):\n image_map = \"\"\n texdata = bpy.data.textures[ts.texture]\n if ts.mapping == \"FLAT\":\n image_map = \"map_type 0 \"\n elif ts.mapping == \"SPHERE\":\n image_map = \"map_type 1 \"\n elif ts.mapping == \"TUBE\":\n image_map = \"map_type 2 \"\n\n # map_type 3 and 4 in development (?) (ENV in pov 3.8)\n # for POV-Ray, currently they just seem to default back to Flat (type 0)\n # elif ts.mapping==\"?\":\n # image_map = \" map_type 3 \"\n # elif ts.mapping==\"?\":\n # image_map = \" map_type 4 \"\n if ts.use_interpolation: # Available if image sampling class reactivated?\n image_map += \" interpolate 2 \"\n if texdata.extension == \"CLIP\":\n image_map += \" once \"\n # image_map += \"}\"\n # if ts.mapping=='CUBE':\n # image_map+= \"warp { cubic } rotate <-90,0,180>\"\n # no direct cube type mapping. Though this should work in POV 3.7\n # it doesn't give that good results(best suited to environment maps?)\n # if image_map == \"\":\n # print(\" No texture image found \")\n return image_map", "def __make_processing(self, img_name, abspath_dir_img, id_foot):\n data = {}\n data['data'] = ImageInfo.get_date(abspath_dir_img)\n data['total_part'] = TOTAL_PART\n data['nuvens'] = ImageInfo.get_cloud(abspath_dir_img)\n self.__make_tms(abspath_dir_img)\n data['geom'] = self.__make_footprint(abspath_dir_img, shp_out=id_foot)\n abspath_rgb, img_name_rgb = ImageInfo.get_image_rgb(\n abspath_dir_img, img_name\n )\n data['tms'] = ImageInfo.get_xml_tms(img_name_rgb)\n data['image'] = img_name_rgb\n data['quicklook'] = self.__make_png(abspath_rgb)\n data['path'] = ImageInfo.get_path(img_name)\n return data", "def detailed_json(self, absolutize_url):\n template = {}\n template.update({\n \"id\": self.image_id,\n \"links\": self.links_json(absolutize_url),\n \"name\": self.name,\n \"minRam\": self.minRam,\n \"minDisk\": self.minDisk,\n \"OS-EXT-IMG-SIZE:size\": self.image_size,\n \"com.rackspace__1__ui_default_show\": self.is_default,\n \"created\": \"1972-01-01_15-59-11\",\n \"updated\": \"1972-01-01_15-59-11\",\n \"status\": \"ACTIVE\",\n \"progress\": 100,\n \"metadata\": self.metadata_json()\n })\n return template", "def detailed_json(self, absolutize_url):\n template = {}\n template.update({\n \"id\": self.image_id,\n \"links\": self.links_json(absolutize_url),\n \"name\": self.name,\n \"minRam\": self.minRam,\n \"minDisk\": self.minDisk,\n \"OS-EXT-IMG-SIZE:size\": self.image_size,\n \"com.rackspace__1__ui_default_show\": self.is_default,\n \"created\": \"1972-01-01_15-59-11\",\n \"updated\": \"1972-01-01_15-59-11\",\n \"progress\": 100,\n \"status\": \"ACTIVE\",\n \"metadata\": self.metadata_json()\n })\n return template", "def export_project_dump(self, key):", "def dict(self):\n d = {}\n d['template_id'] = self.id\n d['name'] = self.name\n d['cpu'] = self.cpu\n d['memory'] = self.memory\n d['points'] = self.points\n d['description'] = self.description\n d['ec2name'] = self.ec2name\n # state is not put in dictionary\n return d", "def serialize(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'body': self.body,\n 'images': {\n 'img_path_xs': self.img_path_xs,\n 'img_path_sm': self.img_path_sm,\n 'img_path_md': self.img_path_md,\n 'img_path_lg': self.img_path_lg\n },\n 'is_active': self.is_active,\n }", "def create_base_image(self, builder, template, parameters):", "def return_template_output(base_dir,filename,data_dict):\n templateLoader = jinja2.FileSystemLoader( searchpath=base_dir)\n templateEnv = jinja2.Environment( loader=templateLoader )\n template = templateEnv.get_template(filename)\n output = template.render(data_dict)\n return output", "def raw_image(self):\n\t\treturn FstabEntry([f\"{self.mount_point}_image\", \"emmc\", self.device])", "def dump(self) -> dict[Any, str]:\r\n ...", "def make_image(self):\n\n if self.type == 'passthrough':\n return\n render_template(\n os.path.dirname(self.main_module),\n os.path.basename(self.main_module_path),\n self.language,\n self.requirements,\n self.whitelist,\n self.type,\n into=self.code_dir)\n self.build()", "def _write_packet_dict(ctx, package_dict):\n p4gf_util.write_dict_to_file(package_dict, _packet_filename(ctx.config.repo_name))", "def outputs(self):\n return {\"path_to_dtb_json_file\": File_IO(\n self.node.outputs[0])}", "def get_heat_json_from_topology_config(config, project_name='admin'):\n\n template = dict()\n template[\"heat_template_version\"] = \"2013-05-23\"\n template[\"resources\"] = dict()\n\n for network in config[\"networks\"]:\n nr = dict()\n nr[\"type\"] = \"OS::Neutron::Net\"\n\n nrp = dict()\n nrp[\"shared\"] = False\n nrp[\"name\"] = network[\"name\"]\n nrp[\"admin_state_up\"] = True\n\n nr[\"properties\"] = nrp\n\n nrs = dict()\n nrs[\"type\"] = \"OS::Neutron::Subnet\"\n #\n p = dict()\n p[\"cidr\"] = \"1.1.1.0/24\"\n p[\"enable_dhcp\"] = False\n p[\"gateway_ip\"] = \"\"\n p[\"name\"] = network[\"name\"] + \"_subnet\"\n if network[\"name\"] == \"virbr0\":\n p[\"network_id\"] = configuration.openstack_mgmt_network\n elif network[\"name\"] == configuration.openstack_external_network:\n p[\"network_id\"] = configuration.openstack_external_network\n else:\n p[\"network_id\"] = {\"get_resource\": network[\"name\"]}\n\n nrs[\"properties\"] = p\n\n template[\"resources\"][network[\"name\"]] = nr\n template[\"resources\"][network[\"name\"] + \"_subnet\"] = nrs\n\n # cache the image_details here to avoid multiple REST calls for details about an image type\n # as many topologies have lots of the same types of images around\n image_details_dict = dict()\n\n for device in config[\"devices\"]:\n\n if device[\"imageId\"] in image_details_dict:\n image_details = image_details_dict[device[\"imageId\"]]\n else:\n image_details = imageUtils.get_image_detail(device[\"imageId\"])\n image_details_dict[device[\"imageId\"]] = image_details\n\n image_name = image_details[\"name\"]\n\n image_disk_size = 20\n\n # set the size in GB, rounding up to the nearest int\n if 'size' in image_details:\n current_size = float(image_details['size'])\n image_disk_size = int(math.ceil(current_size / 1000000000))\n\n # if the glance image asks for a minimum disk size, let's see if it's larger that what we have\n if \"min_disk\" in image_details and image_details['min_disk'] > image_disk_size:\n image_disk_size = image_details[\"min_disk\"]\n\n # if the user has specified a desired disk size, grab it here so we get the correct flavor\n if type(image_disk_size) is int and device[\"resizeImage\"] > image_disk_size:\n image_disk_size = device[\"resizeImage\"]\n\n # determine openstack flavor here\n device_ram = int(device[\"ram\"])\n device_cpu = int(device[\"cpu\"])\n\n flavor_detail = openstackUtils.get_minimum_flavor_for_specs(configuration.openstack_project,\n device_cpu,\n device_ram,\n image_disk_size\n )\n\n flavor = flavor_detail[\"name\"]\n\n dr = dict()\n dr[\"type\"] = \"OS::Nova::Server\"\n dr[\"properties\"] = dict()\n dr[\"properties\"][\"flavor\"] = flavor\n dr[\"properties\"][\"networks\"] = []\n index = 0\n for p in device[\"interfaces\"]:\n port = dict()\n port[\"port\"] = dict()\n port[\"port\"][\"get_resource\"] = device[\"name\"] + \"_port\" + str(index)\n index += 1\n dr[\"properties\"][\"networks\"].append(port)\n\n dr[\"properties\"][\"image\"] = image_name\n dr[\"properties\"][\"name\"] = device[\"name\"]\n\n if device[\"configDriveSupport\"]:\n dr[\"properties\"][\"config_drive\"] = True\n dr[\"properties\"][\"user_data_format\"] = \"RAW\"\n metadata = dict()\n metadata[\"hostname\"] = device[\"name\"]\n metadata[\"console\"] = \"vidconsole\"\n dr[\"properties\"][\"metadata\"] = metadata\n\n # let's check all the configDriveParams and look for a junos config\n # FIXME - this may need tweaked if we need to include config drive cloud-init support for other platforms\n # right now we just need to ignore /boot/loader.conf\n for cfp in device[\"configDriveParams\"]:\n\n if \"destination\" in cfp and cfp[\"destination\"] == \"/boot/loader.conf\":\n logger.debug(\"Creating loader.conf config-drive entry\")\n template_name = cfp[\"template\"]\n loader_string = osUtils.compile_config_drive_params_template(template_name,\n device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"])\n\n logger.debug('----------')\n logger.debug(loader_string)\n logger.debug('----------')\n for l in loader_string.split('\\n'):\n if '=' in l:\n left, right = l.split('=')\n if left not in metadata and left != '':\n metadata[left] = right.replace('\"', '')\n\n if \"destination\" in cfp and cfp[\"destination\"] == \"/juniper.conf\":\n logger.debug(\"Creating juniper.conf config-drive entry\")\n template_name = cfp[\"template\"]\n personality_string = osUtils.compile_config_drive_params_template(template_name,\n device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"])\n\n dr[\"properties\"][\"personality\"] = dict()\n dr[\"properties\"][\"personality\"] = {\"/config/juniper.conf\": personality_string}\n else:\n logger.debug('No juniper.conf found here ')\n\n if device['cloudInitSupport']:\n logger.debug('creating cloud-init script')\n dr[\"properties\"][\"config_drive\"] = True\n dr[\"properties\"][\"user_data_format\"] = \"RAW\"\n metadata = dict()\n metadata[\"hostname\"] = device[\"name\"]\n dr[\"properties\"][\"metadata\"] = metadata\n # grab the prefix len from the management subnet which is in the form 192.168.122.0/24\n if '/' in configuration.management_subnet:\n management_prefix_len = configuration.management_subnet.split('/')[1]\n else:\n management_prefix_len = '24'\n\n management_ip = device['ip'] + '/' + management_prefix_len\n\n device_config = osUtils.get_cloud_init_config(device['name'],\n device['label'],\n management_ip,\n device['managementInterface'],\n device['password'])\n\n script_string = \"\"\n if \"configScriptId\" in device and device[\"configScriptId\"] != 0:\n logger.debug(\"Passing script data!\")\n try:\n script = Script.objects.get(pk=int(device[\"configScriptId\"]))\n script_string = script.script\n device_config[\"script_param\"] = device.get(\"configScriptParam\", '')\n logger.debug(script_string)\n except ObjectDoesNotExist:\n logger.info('config script was specified but was not found!')\n\n user_data_string = osUtils.render_cloud_init_user_data(device_config, script_string)\n dr[\"properties\"][\"user_data\"] = user_data_string\n\n template[\"resources\"][device[\"name\"]] = dr\n\n for device in config[\"devices\"]:\n index = 0\n for port in device[\"interfaces\"]:\n pr = dict()\n pr[\"type\"] = \"OS::Neutron::Port\"\n p = dict()\n\n if port[\"bridge\"] == \"virbr0\":\n p[\"network_id\"] = configuration.openstack_mgmt_network\n\n # specify our desired IP address on the management interface\n p['fixed_ips'] = list()\n fip = dict()\n fip['ip_address'] = device['ip']\n p['fixed_ips'].append(fip)\n\n elif port[\"bridge\"] == configuration.openstack_external_network:\n p[\"network_id\"] = configuration.openstack_external_network\n else:\n p[\"network_id\"] = {\"get_resource\": port[\"bridge\"]}\n # disable port security on all other ports (in case this isn't set globally)\n p['port_security_enabled'] = False\n\n pr[\"properties\"] = p\n template[\"resources\"][device[\"name\"] + \"_port\" + str(index)] = pr\n index += 1\n\n return json.dumps(template)", "def generate_utils(self):\n # type: (Generator) -> str\n return render_to_string(\n self.backend,\n \"utils.py\",\n {\n \"security_defs\": self.security_defs\n },\n )", "def get_kml_dict(self, tx, ty_tms, tz, image_format, draworder = 0):\n d = {}\n\n d[\"south\"], d[\"west\"], d[\"north\"], d[\"east\"] = self.tileswne(tx, ty_tms, tz)\n\n image_filename = get_tile_filename(tx, ty_tms, tz, format_extension[image_format],False)\n d[\"image_filename\"] = image_filename\n d[\"image_filename\"] = d[\"image_filename\"].replace(\"\\\\\",\"/\")\n\n if self.options.url is None:\n d[\"image_url\"] = \"../../%s\" % image_filename\n else:\n d[\"image_url\"] = \"%s%s\" % (self.options.url, image_filename)\n d[\"image_url\"] = d[\"image_url\"].replace(\"\\\\\",\"/\")\n\n url = self.options.url\n if url is None:\n # Top level KML is linked from `doc.kml' and it needs different path.\n if tz == self.tminz:\n url = \"\"\n else:\n url = \"../../\"\n\n if self.options.kmz:\n extension = \"kmz\"\n else:\n extension = \"kml\"\n\n d[\"link_url\"] = \"%s%s\" % (url, get_tile_filename(tx, ty_tms, tz, extension,False))\n d[\"link_url\"] = d[\"link_url\"].replace(\"\\\\\",\"/\")\n\n d[\"minlodpixels\"] = int(self.tilesize / 2)\n d[\"maxlodpixels\"] = -1 # int(self.tilesize * 8)\n\n if tx == 0:\n d[\"draw_order\"] = draworder + 2 * tz + 1\n else:\n d[\"draw_order\"] = draworder + 2 * tz\n\n return d", "def get_mapdata():\n return render_template(\"l_heatmap.html\")", "def get_config_template() -> dict:\n return {\n VENE_PAYMENTS_BAMBORA_API_URL: (str, \"https://payform.bambora.com/pbwapi\"),\n VENE_PAYMENTS_BAMBORA_API_KEY: str,\n VENE_PAYMENTS_BAMBORA_API_SECRET: str,\n VENE_PAYMENTS_BAMBORA_PAYMENT_METHODS: list,\n }", "def make_image_dict(self):\n sprite_sheet = setup.GFX['treasurechest']\n image_dict = {'closed': self.get_image(0, 0, 32, 32, sprite_sheet),\n 'opened': self.get_image(32, 0, 32, 32, sprite_sheet)}\n\n return image_dict", "def calc_template(template_def, config):\n template = Template(**template_def)\n #print \"template_def:\", template_def, \"config:\", config\n try:\n retvals = process_template(template, config, target=(None, None))\n except Exception:\n print(\"==== template ====\"); pprint(template_def)\n print(\"==== config ====\"); pprint(config)\n #traceback.print_exc()\n raise\n output = {}\n for rkey, rv in retvals.items():\n module_id, terminal_id = rkey\n module_key = str(module_id)\n output.setdefault(module_key, {})\n output[module_key][terminal_id] = rv.todict()\n return output", "def fingerprint():\n files = (glob.glob(base_dir + '**/*.html') +\n glob.glob(base_dir + '*.html') +\n glob.glob(base_dir + 'core.js'))\n\n md5s = OrderedDict()\n\n for fil in sorted(files):\n name = fil[len(base_dir):]\n with open(fil) as fp:\n md5 = hashlib.md5(fp.read().encode('utf-8')).hexdigest()\n md5s[name] = md5\n\n template = \"\"\"\\\"\\\"\\\"DO NOT MODIFY. Auto-generated by script/fingerprint_frontend.\\\"\\\"\\\"\n\nFINGERPRINTS = {}\n\"\"\"\n\n result = template.format(json.dumps(md5s, indent=4))\n\n with open(fingerprint_file, 'w') as fp:\n fp.write(result)", "def get_pic() -> str:\n with open(os.path.dirname(os.path.abspath(__file__))+'\\\\data.json', 'r') as test:\n test = json.load(test)\n pic = test['button_pic']\n return pic", "def get_image_name_for_hook(module):\n os.makedirs(INSTANCE_FOLDER, exist_ok=True)\n base_name = str(module).split('(')[0]\n index = 0\n image_name = '.' # '.' is surely exist, to make first loop condition True\n while os.path.exists(image_name):\n index += 1\n image_name = os.path.join(\n INSTANCE_FOLDER, '%s_%d.png' % (base_name, index))\n return image_name", "def generate_image(self):\n pass", "def dump(replay_dir: \"os.PathLike[str]\", template_name: str, context: dict):\n make_sure_path_exists(replay_dir)\n\n if not isinstance(template_name, str):\n raise TypeError('Template name is required to be of type str')\n\n if not isinstance(context, dict):\n raise TypeError('Context is required to be of type dict')\n\n if 'cookiecutter' not in context:\n raise ValueError('Context is required to contain a cookiecutter key')\n\n replay_file = get_file_name(replay_dir, template_name)\n\n with open(replay_file, 'w') as outfile:\n json.dump(context, outfile, indent=2)", "def local_metadata(paths):\n\n # Update template directory\n image_graph = network.load_graph(paths.image_network)\n\n template_paths = {}\n\n def template_selection(path_listing):\n for node in path_listing:\n if os.path.exists(paths.resource_pack + '\\\\' + os.path.join(*(node.split(os.path.sep)[1:]))):\n image_data = dict(image_graph.nodes(data=True))[node]\n print(image_data)\n template_paths[os.path.split(image_data['group_name'])[1]] = node\n return\n\n for bunch in connected_component_subgraphs(image_graph):\n sorted_bunch = network.connectivity_sort(bunch.nodes(), bunch)\n\n if len(sorted_bunch) == 1:\n continue\n\n template_selection(sorted_bunch)\n\n print(str(len(list(template_paths.values()))) + ' templates identified.')\n\n with open(paths.binding_identifiers, 'w') as json_binding_ids:\n json.dump(template_paths, json_binding_ids, sort_keys=True, indent=2)\n\n bindings.build(paths, template_paths.values())", "def img_layer_dict_to_str(layer: dict) -> str:\n\n layer_str = [\n \"const \" + layer[\"name\"],\n ' = L.tileLayer(\"' + layer[\"directory\"] + '\"',\n \", { \",\n 'attribution:\"' + LAYER_ATTRIBUTION + '\", ',\n \"minZoom: \" + str(layer[\"min_zoom\"]) + \", \",\n \"maxZoom: \" + str(layer[\"max_zoom\"]) + \", \",\n \"maxNativeZoom: \" + str(layer[\"max_native_zoom\"]) + \" \",\n \"});\",\n ]\n\n return \"\".join(layer_str)", "def write_template(config: Config) -> Config:\n data = TPLUGIN\n\n if config.template == \"object\":\n data = OPLUGIN\n\n with open(config.path, \"w\") as f:\n content_decoded = base64.b64decode(data)\n content_decompressed = gzip.decompress(content_decoded)\n\n content = content_decompressed.decode(\"ascii\")\n f.write(content.format(**config.format_data))\n\n return config", "def get_string(self):\n return IMAGE_TEMPLATE.substitute(\n caption = self._caption,\n tag = str(self._tag),\n scale = str(self._scale),\n path = str(self._image_path))", "def get_dockerfile_content(self):\n\n dockerfile_content: List[str] = [\n 'FROM nginx:latest',\n '# Update and install required packages',\n 'RUN apt-get update',\n 'RUN apt-get install vim -y',\n '',\n 'COPY ./.docker/config/nginx.conf /etc/nginx/conf.d/nginx.conf',\n '',\n 'ENTRYPOINT [\"nginx\"]',\n 'CMD [\"-g\",\"daemon off;\"]'\n ]\n return dockerfile_content", "def get_layer_info_template(file, print_first_element = True):\n \n try:\n layer_info_template = json.load(open(file))\n if(print_first_element==True):\n print(\"/n----This is the layer info template ----\")\n print(layer_info_template)\n return layer_info_template\n except:\n print(\"Unexpected error:\", sys.exc_info()[0]) \n return None", "def render(self, data_dict, template=None, **kw):\n LOG.debug(\"rendering output as yaml via %s\" % self.__module__)\n return yaml.dump(data_dict, **kw)", "def instructions():\n txt_naming = ConfigReader.texture_naming_dict()\n\n text = \"<b>Texture naming rules:</b><br>(put an underscore _ at the end of file name; \" \\\n \"you can enumerate textures using two digits after texture type without any other character\" \\\n \"<br> e.g. _normal01 or in general _normalXX)\" \\\n \"<br>\"\n\n for key, value in txt_naming.iteritems():\n text += \"<br>- {0}: {1}\".format(key, ', '.join(a for a in value['text']))\n\n text += \"<br>\"\n text += \"<br><b>File formats:</b>\"\n text += \"<br>Meshes:\"\n text += ConfigReader.generate_file_filter()\n text += \"<br>Textures:\"\n text += ConfigReader.generate_texture_filter()\n\n return text", "def dump(self):\n\n result = {\n 'size': self.size,\n 'type': self.type,\n 'filename': self.fullpath,\n 'changed': self.changed,\n }\n\n return result", "def image_to_template(filename):\n return _image2template(filename)", "def import_project_dump(self, key):", "def generate_image_info(image):\n image = ee.Image(image)\n image_vis = image.visualize(**{\n 'min': image_min,\n 'max': image_max,\n 'palette': image_palette\n })\n\n print(image_min, image_max)\n\n if 'hillshade' in r and r['hillshade']:\n image_vis = hillshade(image_vis,\n image.subtract(image_min).divide(ee.Image.constant(image_max).subtract(image_min)),\n True)\n\n m = image_vis.getMapId()\n\n mapid = m.get('mapid')\n token = m.get('token')\n\n url = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'.format(\n mapid=mapid,\n token=token\n )\n\n result = {\n 'mapid': mapid,\n 'token': token,\n 'url': url\n }\n return result", "def generate_image_info(im, params):\n im = ee.Image(im)\n\n # some images are scaled to a factor of 10.\n if params.get('scale') == 'log':\n im = im.log10()\n\n im = im.sldStyle(params.get('sld_style'))\n\n m = im.getMapId()\n\n mapid = m.get('mapid')\n token = m.get('token')\n\n url = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'.format(\n mapid=mapid,\n token=token\n )\n\n result = {\n 'mapid': mapid,\n 'token': token,\n 'url': url\n }\n return result", "def get_content(self):\n # =====================================================================\n class NullUndefined(jinja2.Undefined):\n \"\"\"\n Class required to handle jinja2-variables inside the meta.yaml\n \"\"\"\n # -----------------------------------------------------------------\n def __unicode__(self):\n return six.text_type(self._undefined_name)\n\n # -----------------------------------------------------------------\n def __getattr__(self, attribute_name):\n return six.text_type(f'{self}.{attribute_name}')\n\n # -----------------------------------------------------------------\n def __getitem__(self, attribute_name):\n return f'{self}[\"{attribute_name}\"]'\n\n\n # =====================================================================\n class StrDict(dict):\n \"\"\"\n Class required to handle jinja2-variables inside the meta.yaml\n \"\"\"\n # -----------------------------------------------------------------\n def __getitem__(self, key, default=''):\n return self[key] if key in self else default\n\n return YAML(typ='base').load(\n (jinja2.Environment(undefined=NullUndefined)\n .from_string(self.path.open().read())\n .render(**dict(os=os,\n environ=StrDict(),\n load_setup_py_data=StrDict))))", "def generate(random, pid, autogen_tools, n):\n\n #Get a random build path\n generator_path = autogen_tools.get_directory(__file__)\n\n rendered_template_path = path.join(generator_path, \"encrypted\")\n\n key = \"xor_20134113\"\n flag = \"flag_\" + sha1((str(n) + key).encode('utf-8')).hexdigest()\n text = xor(\"You accessed all my secrets :(. But the juicy diary entries are in another castle! Here is the flag though: \" + flag, random.randint(0x1,0xff))\n\n with codecs.open(rendered_template_path, 'w', \"utf-8\") as out_file:\n out_file.write(text)\n\n encrypted_link = autogen_tools.generate_resource_link(pid, \"encrypted\", title=\"encrypted\")\n source_link = autogen_tools.generate_resource_link(pid, \"diary.py\", static=True, title=\"script\")\n\n return {\n \"resource_files\": {\n \"public\": [\n (rendered_template_path, \"encrypted\"),\n ],\n },\n \"static_files\": {\n \"public\": [\n (path.join(generator_path,\"diary.py\"), \"diary.py\")\n ]\n },\n \"problem_updates\": {\n \"description\": \"<p>A friend of yours has been using this %s to encrypt his diary. Being the nosy person you are, you must take a look! Can you decrypt it?</p><p>%s</p>\" % (source_link, encrypted_link)\n }\n }", "def _info(self):\n text = ''.join(self._lines)\n rendered_text = jinja2.Template(text).render()\n return yaml.load(rendered_text)", "def get_instance_image_info(task):\n ctx = task.context\n node = task.node\n image_info = {}\n # NOTE(pas-ha) do not report image kernel and ramdisk for\n # local boot or whole disk images so that they are not cached\n if (node.driver_internal_info.get('is_whole_disk_image')\n or deploy_utils.get_boot_option(node) == 'local'):\n return image_info\n root_dir = get_http_boot_dir()\n i_info = node.instance_info\n labels = ('kernel', 'ramdisk')\n d_info = deploy_utils.get_image_instance_info(node)\n if not (i_info.get('kernel') and i_info.get('ramdisk')):\n glance_service = service.GlanceImageService(context=ctx)\n iproperties = glance_service.show(d_info['image_source'])['properties']\n for label in labels:\n i_info[label] = str(iproperties[label + '_id'])\n node.instance_info = i_info\n node.save()\n\n for label in labels:\n image_info[label] = (\n i_info[label],\n os.path.join(root_dir, node.uuid, label)\n )\n\n return image_info", "def dump(self):\n cfg = {\n \"Detector\" : \n { \n \"8680\": \n {\n \"CONVENTIONAL\": \n {\n \"1.0\": \n {\n \"1x\": (3.87, 8.51),\n \"2.4x\": (1.6, 6.74),\n \"4.9x\": (0.72, 6.23)\n },\n \n \"3.0\": \n {\n \"1x\": (10.12, 14.07),\n \"2.4x\": (4.2, 11.17),\n \"4.9x\": (1.89, 10)\n }\n \n },\n \n \n \"EMGAIN\": \n {\n \"1.0\" : \n {\n \"1x\": (18.97, 32.25),\n \"2.4x\": (7.61, 19.41),\n \"4.9x\": (3.47, 16.31)\n },\n \n \"3.0\" : \n {\n \"1x\": (46.56, 54.01),\n \"2.4x\": (19.82, 33.3),\n \"4.9x\": (8.84, 26.25)\n },\n \n \"5.0\" : \n {\n \"1x\": (46.49, 70.66),\n \"2.4x\": (19.53, 45.11),\n \"4.9x\": (8.9, 35.87)\n },\n \n \"10.0\" : \n {\n \"2.4x\": (22.45, 52.98),\n \"4.9x\": (10.43, 45.37),\n } \n } \n }, \n \n \"8325\": \n {\n \"CONVENTIONAL\": \n {\n \"1.0\": \n {\n \"1x\": (3.98, 8.64),\n \"2.5x\": (1.6, 6.75),\n \"5.1x\": (0.72, 6.23)\n },\n \n \"3.0\": \n {\n \"1x\": (10.45, 14.42),\n \"2.5x\": (4.14, 10.97),\n \"5.1x\": (1.89, 10.24)\n }\n },\n \n \"EMGAIN\": \n {\n \"1.0\" : \n {\n \"1\": (19.73, 34.13),\n \"2.5x\": (7.88, 20.49),\n \"5.1x\": (3.54, 16.99)\n }, \n \n \"3.0\" : \n {\n \"1\": (48.23, 54.5),\n \"2.5x\": (19.77, 33.41),\n \"5.1x\": (9.04, 27.84)\n },\n \n \"5.0\" : \n {\n \"1\": (50.66, 77.0),\n \"2.5x\": (20.46, 48.08),\n \"5.1x\": (8.7, 35.5)\n },\n \n \"10.0\" : \n {\n \"2.5x\": (22.44, 53.63),\n \"5.1x\": (11.3, 52.55),\n } \n }\n },\n \n \"10522\": \n {\n \"CONVENTIONAL\": \n {\n \"0.1\": \n {\n \"Gain 1\": (3.37, 8.54),\n \"Gain 2\": (0.8, 3.15),\n },\n \n \"1.0\": \n {\n \"Gain 1\": (3.36, 6.51),\n \"Gain 2\": (0.79, 4.59),\n }\n },\n \n \"EMGAIN\": \n {\n \"1.0\" : \n {\n \"Gain 1\": (16.4, 25.1),\n \"Gain 2\": (3.96, 12.3),\n }, \n \n \"10.0\" : \n {\n \"Gain 1\": (16.5, 76.0),\n \"Gain 2\": (4.05, 45.1),\n },\n \n \"20.0\" : \n {\n \"Gain 1\": (17.2, 193.0),\n \"Gain 2\": (4.64, 76.6),\n },\n \n \"30.0\" : \n {\n \"Gain 1\": (18.2, 272.0),\n \"Gain 2\": (5.46, 145.0),\n } \n }\n },\n \n \"10570\": \n {\n \"CONVENTIONAL\": \n {\n \"0.1\": \n {\n \"Gain 1\": (3.31, 8.82),\n \"Gain 2\": (0.8, 3.39),\n },\n \n \"1.0\": \n {\n \"Gain 1\": (3.30, 6.52),\n \"Gain 2\": (0.79, 4.83),\n }\n },\n \n \"EMGAIN\": \n {\n \"1.0\" : \n {\n \"Gain 1\": (16.4, 25.1),\n \"Gain 2\": (4.01, 12.6),\n }, \n \n \"10.0\" : \n {\n \"Gain 1\": (16.5, 85.6),\n \"Gain 2\": (4.06, 42.7),\n },\n \n \"20.0\" : \n {\n \"Gain 1\": (17.5, 142.0),\n \"Gain 2\": (4.81, 76.0),\n },\n \n \"30.0\" : \n {\n \"Gain 1\": (19.3, 256.0),\n \"Gain 2\": (5.88, 166.0),\n } \n }\n }\n },\n \n \"Phot\" :\n {\n \"fwhmpsf\": 6.0,\n \"sigma\": 10.0,\n \"exposure\": \"EXPTIME\",\n \"calgorithm\": \"centroid\",\n \"cbox\" : 8,\n \"maxshift\": 5, \n \"salgorithm\": \"median\",\n \"annulus\": 14,\n \"dannulus\": 16,\n \"apertures\": 12,\n \"zmag\": 27.11\n }\n }\n \n \n # Dump the configuration to json output file\n with open(self.conf_fname, \"w\") as fd:\n json.dump(cfg, fd) \n \n return", "def daten():\n body_list = db.get_body()\n body_dict = {}\n for body in body_list:\n body_dict[str(body['_id'])] = body['name']\n data_list = []\n for file in os.listdir(app.config['data_dump_folder']):\n if file.endswith(\".tar.bz2\"):\n stat = os.lstat(app.config['data_dump_folder'] + os.sep + file)\n data_list.append({\n 'id': file.split('.')[0],\n 'name': body_dict[file.split('.')[0]],\n 'size': \"%d\" % (stat.st_size / 1024.0 / 1024.0)\n })\n file_list = []\n for file in os.listdir(app.config['files_dump_folder']):\n if file.endswith(\".tar.bz2\"):\n stat = os.lstat(app.config['files_dump_folder'] + os.sep + file)\n file_list.append({\n 'id': file.split('.')[0],\n 'name': body_dict[file.split('.')[0]],\n 'size': \"%d\" % (stat.st_size / 1024.0 / 1024.0 / 1024.0)\n })\n return render_template('daten.html', data_list=data_list, file_list=file_list)", "def generate_registry_operations(data):\n data = render_to_string(TEMPLATE_VC, {\"data\": data})\n open(os.path.join(PLUGINSPACE_TD_VC_DIR, FILENAME_VC), \"w+\").write(data)\n return", "def render_dictionary(self): \n asset_json = {\n 'name': self.name,\n 'product_name': self.product_name,\n 'product_vendor': self.product_vendor,\n 'configuration': self.configuration,\n 'description': self.description,\n 'primary_users': self.primary_users,\n 'primary_voting': self.primary_voting,\n 'secondary_users': self.secondary_users,\n 'secondary_voting': self.secondary_voting,\n 'tags': self.tags,\n 'type': self.asset_type,\n 'action_whitelist': self.action_whitelist\n }\n\n if self.ingest_container_label:\n asset_json['ingest'] = {\n 'container_label': self.ingest_container_label,\n 'interval_mins': self.ingest_interval_mins,\n 'poll': self.ingest_poll,\n 'start_time_epoch_utc': self.ingest_start_time\n }\n\n return asset_json", "def generate_loader_hash(symbols):\n return template_loader_hash % (str(PlatformVar(\"entry\")), len(symbols))", "def get_image_data():\n #mac\n #user_images = [i.replace('static/img/', \"\") for i in glob.glob('static/img/*.png')]\n #pc\n #user_images = [i.replace('static\\\\img\\\\', \"\") for i in glob.glob('static\\\\img\\\\*.png')]\n user_images = [i.replace('static/img/', \"\") for i in glob.glob('static/img/*.png')]\n sports = [inflection.titleize(i.replace('.png', \"\").capitalize().replace(\"_\", \" \")) + \"!\" for i in user_images]\n data = list(zip(sports, user_images))\n return data", "def pics_dict(self):\n\n img_dict = {}\n\n for name, path in zip(ICON_NAMES,ICON_PATHS):\n\n if name == \"main_icon\":\n tk_pic = cGUIf.get_TkImage(path,32,32)\n\n else:\n tk_pic = cGUIf.get_TkImage(path,64,64)\n \n img_dict.update({name : tk_pic})\n\n return img_dict", "def extract_template(temp_dir, fea_type):\n kps = []\n descriptors = np.array([])\n in_path = temp_dir + 'imgs/' # images\n names = os.listdir(in_path)\n for i, name in enumerate(names):\n img = cv2.imread(in_path + name, 0)\n if any(np.array(img.shape) > 1000):\n img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n print(img.shape)\n kp, des = get_des(fea_type, img)\n if descriptors.size == 0:\n kps = kp\n descriptors = des\n else:\n kps.extend(kp)\n descriptors = np.vstack((descriptors, des))\n\n print(\"template descriptors shape: \" + str(descriptors.shape))\n with open(temp_dir + fea_type + '_template_0.pickle', 'wb') as ff:\n pickle.dump(descriptors, ff)\n\n # with open(temp_dir + fea_type + '_template_0.pickle', 'rb') as f:\n # template = pickle.load(f)\n\n return", "def img_map_bg(wts):\n tex = bpy.data.textures[wts.texture]\n image_mapBG = \"\"\n # texture_coords refers to the mapping of world textures:\n if wts.texture_coords in [\"VIEW\", \"GLOBAL\"]:\n image_mapBG = \" map_type 0 \"\n elif wts.texture_coords == \"ANGMAP\":\n image_mapBG = \" map_type 1 \"\n elif wts.texture_coords == \"TUBE\":\n image_mapBG = \" map_type 2 \"\n\n if tex.use_interpolation:\n image_mapBG += \" interpolate 2 \"\n if tex.extension == \"CLIP\":\n image_mapBG += \" once \"\n # image_mapBG += \"}\"\n # if wts.mapping == 'CUBE':\n # image_mapBG += \"warp { cubic } rotate <-90,0,180>\"\n # no direct cube type mapping. Though this should work in POV 3.7\n # it doesn't give that good results(best suited to environment maps?)\n # if image_mapBG == \"\":\n # print(\" No background texture image found \")\n return image_mapBG", "def construct_dicts(self, path):\n module_dicts = self.read_dict(path, use_superpkg=True)\n\n id_dict = dict()\n name_dict = dict()\n\n for cmd_dict in module_dicts:\n # Create a cmd template object\n cmd_temp = cmd_template.CmdTemplate(\n cmd_dict[self.OP_CODE_FIELD],\n cmd_dict[self.MNEMONIC_FIELD],\n cmd_dict[self.COMPONENT_FIELD],\n cmd_dict[self.ARGS_FIELD],\n cmd_dict[self.DESC_FIELD],\n )\n\n id_dict[cmd_dict[self.OP_CODE_FIELD]] = cmd_temp\n name_dict[cmd_dict[self.MNEMONIC_FIELD]] = cmd_temp\n\n return (id_dict, name_dict)", "def as_string(self):\r\n effects_list = self.get_effects()\r\n kwo = {}\r\n\r\n for key in self.keys:\r\n kwo[key] = get_stack(key, effects_list)\r\n\r\n template = self.get_template()\r\n\r\n templated = template.format(**kwo)\r\n\r\n return templated", "def get_default_template(env):\n return env.from_string(\n \"\"\"\\\n{% if record.standard_information and record.filename_information %}\n0|{{ prefix }}{{ record.path }}|{{ record.inode }}|0|{{ record.standard_information.owner_id }}|0|{{ record.size }}|{{ record.standard_information.accessed|unixtimestampformat }}|{{ record.standard_information.modified|unixtimestampformat }}|{{ record.standard_information.changed|unixtimestampformat }}|{{ record.standard_information.created|unixtimestampformat }}\n{% endif %}\n{% if record.standard_information and record.filename_information %}\n0|{{ prefix }}{{ record.path }} (filename)|{{ record.inode }}|0|{{ record.standard_information.owner_id }}|0|{{ record.size }}|{{ record.filename_information.accessed|unixtimestampformat }}|{{ record.filename_information.modified|unixtimestampformat }}|{{ record.filename_information.changed|unixtimestampformat }}|{{ record.filename_information.created|unixtimestampformat }}\n{% endif %}\n{% for e in record.indx_entries %}\n0|{{ prefix }}{{ record.path }}\\\\{{ e.name }} (INDX)|{{ e.inode }}|0|0|0|{{ e.logical_size }}|{{ e.accessed|unixtimestampformat }}|{{ e.modified|unixtimestampformat }}|{{ e.changed|unixtimestampformat }}|{{ e.created|unixtimestampformat }}\n{% endfor %}\n{% for e in record.slack_indx_entries %}\n0|{{ prefix }}{{ record.path }}\\\\{{ e.name }} (slack-INDX)|{{ e.inode }}|0|0|0|{{ e.logical_size }}|{{ e.accessed|unixtimestampformat }}|{{ e.modified|unixtimestampformat }}|{{ e.changed|unixtimestampformat }}|{{ e.created|unixtimestampformat }}\n{% endfor %}\n\"\"\"\n )", "def get_configmap_dict():\n template = textwrap.dedent(\n \"\"\"\n kind: ConfigMap\n apiVersion: v1\n metadata:\n name: fio-config\n data:\n workload.fio: |\n # here comes workload configuration\n \"\"\"\n )\n cm_dict = yaml.safe_load(template)\n return cm_dict", "def template():\n return ENVIVIRTUALIZABLEURI('DEFile')", "def brief_json(self, absolutize_url):\n template = {}\n template.update({\n \"id\": self.image_id,\n \"links\": self.links_json(absolutize_url),\n \"name\": self.name\n })\n return template", "def generate(random, pid, autogen_tools, n):\n\n generator_path = autogen_tools.get_directory(__file__)\n\n template_path = path.join(generator_path, \"code.txt.template\")\n rendered_template_path = path.join(generator_path, \"code.txt\")\n\n autogen_tools.replace_source_tokens(\n template_path,\n {\"flag\": gen_code(n, \"Aviation House\")},\n rendered_template_path\n )\n\n code_link = autogen_tools.generate_resource_link(pid, \"code.txt\", title=\"Encrypted file\")\n\n return {\n \"resource_files\": {\n \"public\": [\n (rendered_template_path, \"code.txt\"),\n ],\n },\n \"static_files\": {\n },\n \"problem_updates\": {\n \"description\": \"<p>We've updated the system to AES. We heard that this is military grade encryption so that should fix everything</p><p>The team have stored the password in %s. Bet you can't get into it</p>\" % code_link\n }\n }", "def __repr__(self) -> str:\n dump_conf = copy.deepcopy(self)\n string = \"\"\n for k in dump_conf:\n v = dump_conf[k]\n if k == \"wpscan_args\":\n v = safe_log_wpscan_args(v)\n if k == \"smtp_pass\" and v != \"\":\n v = \"***\"\n if isinstance(v, (list, dict)):\n v = json.dumps(v)\n else:\n v = str(v)\n string += f\"\\n{k:<25}\\t=\\t{v}\"\n return string", "def _get_imagenet_as_dict(self):\n real_file_path = os.path.realpath(self.map_file)\n if not os.path.exists(real_file_path):\n raise IOError(\"map file {} not exists\".format(self.map_file))\n\n label_dict = {}\n with open(real_file_path) as fp:\n line = fp.readline()\n while line:\n labels = line.split(\" \")\n label_dict[labels[1]] = labels[0]\n line = fp.readline()\n\n # get all the dir which are n02087046, n02094114, n02109525\n dir_paths = {}\n for item in label_dict:\n real_path = os.path.join(self.image_dir, label_dict[item])\n if not os.path.isdir(real_path):\n logger.warning(\"{} dir is not exist\".format(real_path))\n continue\n dir_paths[item] = real_path\n\n if not dir_paths:\n raise PathNotExistsError(\"not valid image dir in {}\".format(self.image_dir))\n\n # get the filename, label and image binary as a dict\n for label in dir_paths:\n for item in os.listdir(dir_paths[label]):\n file_name = os.path.join(dir_paths[label], item)\n if not item.endswith(\"JPEG\") and not item.endswith(\"jpg\"):\n logger.warning(\"{} file is not suffix with JPEG/jpg, skip it.\".format(file_name))\n continue\n data = {}\n data[\"file_name\"] = str(file_name)\n data[\"label\"] = int(label)\n\n # get the image data\n real_file_path = os.path.realpath(file_name)\n image_file = open(real_file_path, \"rb\")\n image_bytes = image_file.read()\n image_file.close()\n if not image_bytes:\n logger.warning(\"The image file: {} is invalid.\".format(file_name))\n continue\n data[\"image\"] = image_bytes\n yield data", "def to_json(self):\n template = {\n \"tensorName\": self.title,\n \"tensorShape\": list(self.vector_shape),\n \"tensorPath\": self.vector_url,\n \"metadataPath\": self.metadata_url,\n }\n if self.sprite_url is not None:\n template[\"sprite\"] = {\n \"imagePath\": self.sprite_url,\n \"singleImageDim\": list(self.image_size),\n }\n return template", "def to_dict(self) -> dict:\n return {\n \"name\": self.package_name,\n \"mpy_version\": self.mpy_version,\n \"publish\": self._publish,\n \"pkg_version\": str(self.pkg_version),\n \"path\": self.package_path.name, # only store the folder name , as it is relative to the publish folder\n \"stub_sources\": [(name, Path(path).as_posix()) for (name, path) in self.stub_sources],\n \"description\": self.description,\n \"hash\": self.hash,\n \"stub_hash\": self.stub_hash,\n }", "def s2etyrec_image(node, key_image, _paren_if_fun, paren_if_app):\n (key, image) = key_image # `image` is a function.\n assert key == t.S2EXP_NODE or key == t.S2EXP_SRT\n kind_tag = node[0]\n flat = True\n if t.TYRECKINDBOX in kind_tag or t.TYRECKINDBOX_LIN in kind_tag:\n flat = False\n labels = node[2]\n result = \"\"\n if paren_if_app:\n result += \"(\"\n if flat:\n result += \"@{\"\n else:\n result += \"'{\"\n first = True\n for label in labels:\n if not first:\n result += \", \"\n label = label[t.SL0ABELED]\n label_name = label[0]\n if t.LABINT in label_name:\n result += str(label_name[t.LABINT])\n elif t.LABSYM in label_name:\n result += label_name[t.LABSYM]\n else:\n error(\"Unknown label name type\")\n result += \"=\"\n result += image(label[2][key])\n first = False\n result += \"}\"\n if paren_if_app:\n result += \")\"\n return result", "def make_image(self, **kwargs):\n image = dict(self.BASE_EMR_IMAGE, **kwargs)\n\n return {k: v for k, v in image.items() if v is not None}", "def create_template_dict(name, cat, boilerplate_name=None, is_common=False):\r\n return {\r\n \"display_name\": name,\r\n \"category\": cat,\r\n \"boilerplate_name\": boilerplate_name,\r\n \"is_common\": is_common\r\n }", "def __repr__(self):\n template = \"{}(confetti_key='{}', confetti_app='{}', \" \\\n \"confetti_path='{}', session={}, recursive={})\"\n return template.format(\n self.__class__.__name__,\n self.confetti_key,\n self.confetti_app,\n self.confetti_path,\n self.session,\n self.recursive,\n )", "def _get_config_template(self, key):\n tmp_path = self._get_config_value('templates', 'path') + key\n return tmp_path", "def create_html(pic_info,sum_pic,upload_path,yun_link=('1','2')):\n save_file=pic_info+'.txt'\n content=\"\"\"\n <p>\n <img src=\"%s%s\" style=\"\" title=\"%s\"/>\n </p>\n <p>\n <img src=\"%s%s\" style=\"\" title=\"%s\"/>\n </p>\n <p>\n <img src=\"%s%s\" style=\"\" title=\"%s\"/>\n </p>\n <p>\n <img src=\"%s%s\" style=\"\" title=\"%s\"/>\n </p>\n <p>\n <span style=\"color: #FF0000; font-size: 24px;\">link: \n </span>\n <a href=\"%s\" target=\"_blank\" \n style=\"font-size: 24px; text-decoration: underline;\">\n <span style=\"font-size: 24px;\">%s\n </span>\n </a> \n <span style=\"font-size: 24px;\">\n <span style=\"color: #FF0000; font-size: 24px;\">code:\n </span>\n %s\n </span>\n </p>\\n\\n\\n\\n\\n\\n\\n\\n\\n\n \"\"\"%(upload_path,sum_pic[0],sum_pic[0],upload_path,sum_pic[1],sum_pic[1],\n upload_path,sum_pic[2],sum_pic[2],upload_path,sum_pic[3],sum_pic[3],\n yun_link[0],yun_link[0],yun_link[1])\n with open(save_file, 'w') as f:\n f.write(content)\n f.close()", "def image_capture_demo():\n return render_template('image_capture_demo.html')", "def __serialize__(self):\n return {\"_custom_type\" : self.__class__.__name__,\n \"name\" : self.name,\n \"src\" : self.src,\n \"exec_loc\" : self.exec_loc,\n \"precompiled\" : self.precompiled}", "def __str__(self):\n gppkg_spec_file = '''\nPkgName: ''' + self.name + '''\nVersion: ''' + self.version + '''\nGPDBVersion: ''' + self.gpdbversion + '''\nDescription: Temporary Test Package\nOS: ''' + self.os + '''\nArchitecture: ''' + self.arch\n\n return gppkg_spec_file", "def get_tool_config(template_file: Path, src: Path) -> dict:\n copyright_dates = _determines_copyright_dates()\n return {\n \"owner\": configuration.get_value(ConfigurationVariable.ORGANISATION),\n \"dir\": src,\n \"projname\": configuration.get_value(ConfigurationVariable.PROJECT_NAME),\n \"tmpl\": str(template_file),\n \"years\": copyright_dates,\n \"additional-extensions\": ADDITIONAL_EXTENSIONS,\n \"exclude\": FILES_TO_IGNORE,\n }", "def get_hash():\n return render(build_hash('command'),False)", "def createNames(pathImg=\"cubes/img\"):\r\n\tobj_name={}\r\n\tfor i in range(10):\r\n\t\tpath =pathImg+chr(47)+str(i)\r\n\t\tpng_name=[]\r\n\t\tfiles = os.listdir(path)\r\n\t\tfor name in files:\r\n\t\t\tpng_name.append(name.replace(\".png\",\"\"))\r\n\t\tobj_name[i]=png_name\r\n\treturn obj_name", "def _create_manifest(self, templates_dir, static_dir):\n return \"\"\"\n graft %(templates_dir)s\n graft %(static_dir)s\n\n include COPYING\n include INSTALL\n include README.md\n include *-requirements.txt\n\n global-exclude .*.sw[op] *.py[co] __pycache__ .DS_Store .noseids\n \"\"\" % {\n 'templates_dir': templates_dir,\n 'static_dir': static_dir,\n }", "def image(self):\n return \"{}/{}:{}\".format(\n self.namespace,\n self.name,\n self.build.commit.tag if self.build.commit.tag\n else self.build.commit.commit_hash\n )", "def verifyBootImage( template, sztpBootImage ):\n def verifyImageVerification( imageVerification ):\n \"\"\"Verify instance of image-verification is correct\"\"\"\n if \"hash-algorithm\" in imageVerification:\n assert imageVerification[ \"hash-algorithm\" ] == \\\n \"ietf-sztp-conveyed-info:sha-256\",\\\n \"Unsupported hash-algorithm\"\n assert \"hash-value\" in imageVerification, \\\n \"Expected hash-value not present\"\n hashValue = imageVerification[ \"hash-value\" ]\n # Verify hashValue appears to be a yang:hex-string\n assert len( hashValue ) == 32 * 3 - 1 and \\\n all( c == ':' or c in string.hexdigits for c in hashValue ), \\\n \"hash-value invalid\"\n\n def verifyImageVerificationList( template, sztpImageVerification ):\n \"\"\"Verify image-verification list is correct\"\"\"\n assert isinstance( sztpImageVerification, list ), \\\n \"Expected list\"\n for imageVer in sztpImageVerification:\n assert verifyDictTypes( template, imageVer ), \"Unexpected value types\"\n assert set( imageVer.keys() ).issubset( set( template.keys() ) ), \\\n \"Unexpected keys in dict\"\n verifyImageVerification( imageVer )\n\n mandatory = [ \"download-uri\" ]\n assert isinstance( sztpBootImage, dict ), \"Expected dict\"\n assert set( sztpBootImage.keys() ).issubset( template.keys() ), \\\n \"Unexpected keys in dict\"\n assert verifyDictTypes( template, sztpBootImage ), \\\n \"Unexpected value types\"\n assert set( mandatory ).issubset( sztpBootImage ), \\\n \"Mandatory keys not present\"\n if \"image-verification\" in sztpBootImage:\n verifyImageVerificationList( template[ \"image-verification\" ][ 0 ],\n sztpBootImage[ \"image-verification\" ] )", "def GetResources(self):\n dirn = os.path.dirname(os.path.dirname(__file__))\n icon = os.path.join(dirn, 'resources', 'EnVis_ifc_open.svg')\n\n return {'Pixmap': icon,\n 'MenuText': QT_TRANSLATE_NOOP(\"EnVis_Import\",\n \"Import IFC file\"),\n 'ToolTip': QT_TRANSLATE_NOOP(\"EnVis_Import\",\n \"Import IFC Elements useful for energy calculations\")}", "def gen_file():\n content = clean(read_file())\n content += PREFIX\n instances = ec2.instances.filter(Filters=[{\"Name\": \"instance-state-name\", \"Values\": [\"running\"]}])\n for instance in instances:\n private_dns_name = instance.private_dns_name\n private_hostname = instance.private_dns_name.split('.')[0]\n if instance.public_ip_address:\n content += \"{} {} {}\\n\".format(instance.public_ip_address.ljust(15), private_dns_name, private_hostname)\n content += SUFFIX + \"\\n\"\n return content", "def gen_otter_file(notebook_path):\n config = {}\n\n service = Exam.config.get('service', {})\n if service:\n config.update({\n \"endpoint\": service[\"endpoint\"],\n \"auth\": service.get(\"auth\", \"google\"),\n \"assignment_id\": service[\"assignment_id\"],\n \"class_id\": service[\"class_id\"]\n })\n\n config[\"notebook\"] = service.get('notebook', notebook_path.name)\n config[\"save_environment\"] = Exam.config.get(\"save_environment\", False)\n config[\"ignore_modules\"] = Exam.config.get(\"ignore_modules\", [])\n\n if Exam.config.get(\"variables\", None):\n config[\"variables\"] = Exam.config.get(\"variables\")\n\n config_path = notebook_path.with_suffix('.otter')\n with open(config_path, \"w+\") as f:\n json.dump(config, f, indent=4)", "def get_configuration_template(self):\n return {'EXAMPLE_KEY_1': \"Example value\",\n 'EXAMPLE_KEY_2': [\"Example\", \"Value\"]\n }", "def getList(self):\n labelMap = {}\n imageMap = {}\n key = []\n index = 0\n\n for root, dirs, files in os.walk(self.path_data):\n for file in files:\n # If .png or .jpg file found then\n if file.endswith(tuple(config.imageFormat)):\n key.append(index)\n labelMap[index] = preprocessing.getLabel(file)\n imageMap[index] = os.path.join(root, file)\n\n index += 1\n\n else:\n continue\n\n return key, imageMap, labelMap", "def get_crud_template_dict():\n return CRUD_TEMPLATE_DICT", "def settings_information():\n return {\n \"version\": VERSION,\n \"modules_directory\": MODULES_DIR,\n \"web_directory\": WEB_DIR,\n \"dependencies_directory\": DEPENDENCIES_DIR,\n \"bot_directory\": BOT_DIR,\n \"bot_data_directory\": BOT_DATA_DIR,\n \"bot_image_directory\": BOT_IMAGE_DIR,\n \"local_data_directory\": LOCAL_DATA_DIR,\n \"local_data_database_directory\": LOCAL_DATA_DB_DIR,\n \"local_data_log_directory\": LOCAL_DATA_LOG_DIR,\n \"local_data_backup_directory\": LOCAL_DATA_BACKUP_DIR,\n \"database_name\": DB_NAME,\n \"database_file\": DB_FILE,\n \"authentication_base_url\": AUTH_BASE_URL,\n \"authentication_auth_url\": AUTH_AUTH_URL,\n \"tesseract_dependency_directory\": TESSERACT_DEPENDENCY_DIR,\n \"tesseract_directory\": TESSERACT_DIR,\n \"tesseract_path\": TESSERACT_PATH,\n }", "def get_configuration_template(self):\n return {'EXAMPLE_KEY_1': \"Example value\",\n 'EXAMPLE_KEY_2': [\"Example\", \"Value\"]\n }", "def render_template(main_module_path: str,\n main_module_name: str,\n language: str,\n requirements: List[str],\n whitelist: List[str],\n adapter_type: str,\n into):\n\n dockerfile_template = jinja2.Template(\n open('/adama/containers/Dockerfile.adapter').read())\n requirement_cmds = (\n 'RUN ' + requirements_installer(language, requirements)\n if requirements else '')\n\n dockerfile = dockerfile_template.render(\n main_module_path=main_module_path,\n main_module_name=main_module_name,\n language=language,\n requirement_cmds=requirement_cmds,\n whitelist=json.dumps(whitelist),\n adapter_type=adapter_type\n )\n with open(os.path.join(into, 'Dockerfile'), 'w') as f:\n f.write(dockerfile)", "def image(self): # type: () -> str\n return self.config['Image']", "def render_application_template(self):\n self.pipeline_config['instance_links'] = self.retrieve_instance_links()\n jsondata = get_template(\n template_file='infrastructure/app_data.json.j2', appinfo=self.appinfo, pipeline_config=self.pipeline_config)\n return jsondata" ]
[ "0.5737873", "0.55475605", "0.5534782", "0.5529993", "0.55225337", "0.55086523", "0.5487717", "0.54603654", "0.545359", "0.54374003", "0.53441983", "0.53371257", "0.5325836", "0.5310178", "0.530841", "0.5289318", "0.5286247", "0.52432984", "0.5220684", "0.5216042", "0.5184362", "0.5157884", "0.5150819", "0.5148567", "0.514077", "0.5132544", "0.51256675", "0.5119062", "0.51017666", "0.5085865", "0.5078005", "0.5077131", "0.5067781", "0.5061879", "0.505137", "0.50483096", "0.50476044", "0.5028841", "0.50257254", "0.50230336", "0.50172514", "0.4990034", "0.49865264", "0.4967986", "0.49660137", "0.4964454", "0.49624893", "0.49624145", "0.49542904", "0.4932492", "0.4920227", "0.4911862", "0.49104717", "0.48980546", "0.4896835", "0.4890602", "0.4885593", "0.4883157", "0.48800558", "0.48774394", "0.48762146", "0.48752365", "0.48743352", "0.48679316", "0.4866277", "0.4855211", "0.48464733", "0.4844375", "0.48363003", "0.48361102", "0.4830432", "0.48276713", "0.48257038", "0.48191246", "0.48155633", "0.48154318", "0.48136222", "0.48088604", "0.4796984", "0.47957832", "0.4795031", "0.47938603", "0.47899622", "0.4789873", "0.47748813", "0.47685382", "0.4765456", "0.47644907", "0.47605905", "0.47496837", "0.47453827", "0.4738789", "0.47385842", "0.47348234", "0.47301397", "0.47280422", "0.47224003", "0.47185305", "0.47176784", "0.47174588" ]
0.7714567
0
Update the doubleQ method, i.e. make sure to select actions a' using self.Q but evaluate the Qvalues using the target network (see slides). In other words, > self.target(s) is a Qfunction network which evaluates
def experience_replay(self): s,a,r,sp,done = self.memory.sample(self.batch_size) # TODO: 5 lines missing. raise NotImplementedError("") self.Q.fit(s, target=target)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def learn(self):\n if self.learn_step_counter % self.target_q_update_step == 0:\n self.target_net.load_state_dict(self.eval_net.state_dict()) #update target_net's parameters\n logging.info(\"updtate target q\")\n self.learn_step_counter += 1\n\n rgbs,depths, rgbs_1, depths_1,questions,actions,rewards,terminals = self.memory.sample()\n\n rgbs_var = Variable(torch.FloatTensor(rgbs).cuda())\n depths_var = Variable(torch.FloatTensor(depths).cuda())\n rgbs_1_var = Variable(torch.FloatTensor(rgbs_1).cuda())\n depths_1_var = Variable(torch.FloatTensor(depths_1).cuda())\n questions_var = Variable(torch.LongTensor(questions).cuda())\n actions_var = Variable(torch.LongTensor(actions).cuda())\n rewards_var = Variable(torch.FloatTensor(rewards).cuda())\n terminals_var = Variable(torch.FloatTensor(terminals).cuda())\n\n q_eval_matrix = self.eval_net(rgbs_var,depths_var,questions_var)\n q_eval_matrix = q_eval_matrix.view(-1,9*28*28)\n actions_var = actions_var.view(-1,1)\n q_eval = torch.gather(q_eval_matrix, 1, actions_var) \n q_eval = q_eval.squeeze(1)\n\n q_next_matrix = self.target_net(rgbs_1_var,depths_1_var,questions_var).detach() #don't backward\n q_next_matrix = q_next_matrix.view(-1,9*28*28)\n q_next = torch.max(q_next_matrix,1)[0]\n\n one_var = Variable(torch.ones_like(terminals_var))\n\n q_target = rewards_var + (one_var- terminals_var)*self.discount * q_next\n \n loss = self.loss_func(q_eval, q_target)\n\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n self.task_total_loss += loss.item()\n self.task_total_q += q_target.mean()\n self.update_count += 1", "def qUpdate(self,state,action,reward,next_state):\r\n #get delta\r\n \r\n #delta = reward + self.gamma * self.Q(next_state,next_action) \\\r\n # - self.Q(state,action)\r\n \r\n #get e update\r\n #self.e = self.gamma *self.lam * self.e - self.grad(state,action)\r\n \r\n \r\n #do update to w\r\n \r\n #self.w = self.alpha * delta * self.e\r\n #get difference between current q and new q\r\n \r\n delta = reward + self.gamma * self.maxQ(next_state)[0] - \\\r\n self.Q(state,action) \r\n #update w\r\n self.w = self.w + self.alpha * delta * self.grad(state,action)", "def update(Q, target_Q, opt, samples, gamma=0.99, target_type='double_dqn'):\n xp = Q.xp\n obs = xp.asarray([sample[0] for sample in samples], dtype=np.float32)\n action = xp.asarray([sample[1] for sample in samples], dtype=np.int32)\n reward = xp.asarray([sample[2] for sample in samples], dtype=np.float32)\n done = xp.asarray([sample[3] for sample in samples], dtype=np.float32)\n obs_next = xp.asarray([sample[4] for sample in samples], dtype=np.float32)\n # Predicted values: Q(s,a)\n y = F.select_item(Q(obs), action)\n # Target values: r + gamma * max_b Q(s',b)\n with chainer.no_backprop_mode():\n if target_type == 'dqn':\n next_q = F.max(target_Q(obs_next), axis=1)\n elif target_type == 'double_dqn':\n next_q = F.select_item(target_Q(obs_next),\n F.argmax(Q(obs_next), axis=1))\n else:\n raise ValueError('Unsupported target_type: {}'.format(target_type))\n target = reward + gamma * (1 - done) * next_q\n loss = mean_clipped_loss(y, target)\n Q.cleargrads()\n loss.backward()\n opt.update()", "def get_target_q_values(\n self, next_states: torch.Tensor, rewards: torch.Tensor, dones: torch.Tensor\n ) -> torch.Tensor:\n return ddqn_q_target(self, next_states, rewards, dones)", "def update_target(self):\n with torch.no_grad():\n for target_q_param, q_param in zip(self.target_q_funcs.parameters(), self.q_funcs.parameters()):\n target_q_param.data.copy_(self.tau * q_param.data + (1.0 - self.tau) * target_q_param.data)\n for target_pi_param, pi_param in zip(self.target_policy.parameters(), self.policy.parameters()):\n target_pi_param.data.copy_(self.tau * pi_param.data + (1.0 - self.tau) * target_pi_param.data)", "def update_target_q_network(self):\n assert self.target_network != None\n self.target_network.run_copy()", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use Q-Learning algoritm in slide 58 of MDP\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n maxQns = self.getValue(nextState) # get max q-value of next state\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action) #self.qValues[(state, action)]\n difference = reward + self.discountRate * maxQns - Qsa\n self.qValues[(state, action)] += self.alpha * difference\n \n self.vitCount[(state, action)] += 1\n \"\"\" END CODE \"\"\"", "def update_q_values(self, state, action, next_state, reward, done):\n # Following the Q-Learning update rule\n if done:\n self.Q_values[state,action] = (1 - self.alpha) * self.Q_values[state,action] + self.alpha * (reward)\n else:\n self.Q_values[state,action] = (1 - self.alpha) * self.Q_values[state,action] + self.alpha * (reward + self.gamma * np.amax(self.Q_values[next_state]))", "def update1(self, state, action, nextState, reward):\n #print \"update1 in ApproximateQAgent\"\n \"*** YOUR CODE HERE ***\"\n ##################################################################################################################################Eric Did Stuff\n actionList = nextState.getLegalActions(self.index)\n\n\n #print \"Action List\", actionList\n\n\n\n\n weights = self.getWeights()\n\n features = self.featExtractor.getFeatures(state, action, self)\n #self.myFeats = features\n if self.index == 0:\n print \"FEATURES: \",features\n value = self.computeValueFromQValues(nextState)\n qValue = self.getQValue(state,action)\n #print \"value\", value, \"qValue\", qValue\n for feature in features:\n if len(actionList) != 0:\n weights[feature] = weights[feature] + self.alpha * (reward + self.discount * value - qValue) * features[feature]\n else:\n weights[feature] = weights[feature] + self.alpha * (reward - qValue) * features[feature]\n #print \"feature\", feature, \"weights\", weights[feature]\n #print \"weights\", weights\n\n #util.raiseNotDefined()", "def update_target_network(self):\n self.target_Qmodel = clone_model(self.Qmodel)\n self.target_Qmodel.set_weights(self.Qmodel.get_weights())\n\n # target network is never compiled\n self.target_Qmodel.compile(loss='mse', optimizer=Adam())", "def call_Q(self, state_vector, update=None, action_taken=None, target=None, loss_weights=None):\n\n\t\t# This corresponds to moving as many armies as possible\n\t\taction = 1\n\n\t\treturn action", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n Q_Value = self.Q #calling constructor\n\n learning_rate = self.alpha #gives us the learning rate\n\n temporary_QValue = self.getQValue(state,action) #to get the Q value of the state\n\n nextState_QValue = self.getValue(nextState) #to get the Q value of the landing state when taken action a and state s\n\n discount_factor = self.discount #to get the gamma/ discount factor\n\n\n Q_Value[(state,action)] = ((1-learning_rate) * temporary_QValue) + (learning_rate * (reward + discount_factor * nextState_QValue)) #for formula go to README_Reinforcement.txt at line 8\n\n #util.raiseNotDefined()", "def _update_q_value(self, start_state, to_state, reward, iteration):\n if start_state.as_tuple() not in self.qstore.q:\n self.enum.enumerate_state(start_state, self.qstore.q)\n if to_state.as_tuple() not in self.qstore.q:\n self.enum.enumerate_state(to_state, self.qstore.q)\n\n actions = self.qstore.q[start_state.as_tuple()]['actions']\n values = self.qstore.q[start_state.as_tuple()]['utilities']\n\n max_over_next_states = max(self.qstore.q[to_state.as_tuple()]['utilities']) if to_state.terminate != 1 else 0\n\n action_between_states = to_state.as_tuple()\n\n action_index = actions.index(action_between_states)\n learning_rate_alpha = 1 / (iteration ** self.state_space_parameters.learning_rate_omega)\n\n # Q_Learning update rule\n values[action_index] = ( # Q_t+1(s_i,𝑢) =\n values[action_index] + # Q_t(s_i,𝑢)\n learning_rate_alpha * ( # α\n reward # r_t\n + self.state_space_parameters.discount_factor # γ\n * max_over_next_states # max_{𝑢'∈ 𝒰(s_j)} Q_t(s_j,𝑢')\n - values[action_index] # -Q_t(s_i,𝑢)\n )\n )\n\n self.qstore.q[start_state.as_tuple()] = {'actions': actions, 'utilities': values}", "def update_Q(self, reward):\n old_estimate = self.q_estimates[self.prev_action]\n self.q_estimates[self.prev_action] = old_estimate + 1/self.N[self.prev_action] * (reward - old_estimate)", "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def update_Q(self):", "def update_Q(self, state, action, new_state, reward):\n \n self.Q[(state, action)] = self.Q[(state, action)] + self.alpha*(reward + self.gamma * self.max_value(new_state) - self.Q[(state, action)])", "def second_q_ops(self):\n raise NotImplementedError()", "def update_Q(self, state, action, new_state, new_action, reward):\n \n self.Q[(state, action)] = float(self.Q[(state, action)] + self.alpha*(reward + self.gamma * self.Q[new_state, new_action] - self.Q[(state, action)]))", "def _transition_q_learning(self):\n if self.state.as_tuple() not in self.qstore.q:\n self.enum.enumerate_state(self.state, self.qstore.q)\n\n action_values = self.qstore.q[self.state.as_tuple()]\n # epsilon greedy choice\n if np.random.random() < self.epsilon:\n action = State(*action_values['actions'][np.random.randint(len(action_values['actions']))])\n else:\n max_q_value = max(action_values['utilities'])\n max_q_indexes = [i for i in range(len(action_values['actions'])) if\n action_values['utilities'][i] == max_q_value]\n max_actions = [action_values['actions'][i] for i in max_q_indexes]\n action = State(*max_actions[np.random.randint(len(max_actions))])\n\n self.state = action.copy()\n\n self._post_transition_updates()", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n\n #simple implementation of a python noob to implement DDQN\n bla = torch.from_numpy(np.zeros(64)).float().to(device)\n for i in range(64):\n bla[i] = self.qnetwork_target(next_states[i]).detach()[self.qnetwork_local(next_states).detach().argmax(1)[i]]\n Q_targets_next = bla.unsqueeze(1)\n #this was my first try of ddqn in python style, but as i said i'm a noob and didn't get it working\n #Q_targets_next = [self.qnetwork_target(next_states).detach()[i] for i in self.qnetwork_local(next_states).detach().argmax(1).unsqueeze(1)]\n \n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss\n loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n self.qValues[(state, action)] = ((1 - self.alpha) * self.getQValue(state, action)) + self.alpha \\\n * (reward + self.discount * self.computeValueFromQValues(nextState))", "def act(self, state_and_prev_recurrent, eps=0.):\n state_and_prev_recurrent = torch.from_numpy(state_and_prev_recurrent).float().unsqueeze(0).to(device)\n self.qnetwork_local.eval()\n with torch.no_grad():\n action_values = self.qnetwork_local(state_and_prev_recurrent)[:, :4]\n self.qnetwork_local.train()\n\n # Epsilon-greedy action selection\n if random.random() > eps:\n return np.argmax(action_values.cpu().data.numpy())\n else:\n return random.choice(np.arange(self.action_size))", "def loss_function(self, q_vals, next_q_vals, rewards, actions, double_q_vals=None):\n with self.graph.as_default():\n with tf.name_scope('loss'):\n \"\"\"\n Calculate the target value(s)\n \"\"\"\n if double_q_vals is not None:\n # Select maximizing action using online network\n max_index = tf.argmax(double_q_vals, axis=1, output_type=tf.int32)\n indices = tf.stack([tf.range(0,self.batch_size), max_index], axis=-1)\n # Evaluate Q using target network\n next_q_acted = tf.gather_nd(next_q_vals, indices)\n else:\n # Select the maximum value of the next_q_vals: max_a Q(s_t+1,a)\n next_q_acted = tf.reduce_max(next_q_vals, axis=1)\n # y = r + gamma * max Q(s_t+1)\n target = tf.add_n([rewards, tf.scalar_mul(self.gamma, next_q_acted)], name='target_values')\n \"\"\"\n Retrieve the Q-value(s) of the given actions\n \"\"\"\n # Q(s_t,a_t)\n indices = tf.stack([tf.range(0,self.batch_size), actions], axis=-1)\n q_acted = tf.gather_nd(q_vals, indices)\n \"\"\"\n Calculate the loss: squared TD-error\n \"\"\"\n # This is the TD-error: y - Q(s_t,a_t)\n diff = tf.subtract(target, q_acted, name='TD_errors')\n # reduce-mean averages the negative and positive td-errors\n td_loss = tf.square(diff, name='squared_TD_errors')\n loss = tf.reduce_mean(td_loss)\n # Squared_TD_errors is the mean-squared-loss we want to minimize in training\n\n return loss, diff", "def update_Q(self, state, action, reward, new_state = None):\n Q_val = self.Q[state][action]\n \n # Look at the best action from the next state.\n Qp_val = 0\n if new_state is not None:\n Qp_val = max(self.Q[new_state].values())\n \n # The famous formula:\n Q_val = Q_val + self.alpha * (reward + self.gamma * Qp_val - Q_val)\n #print self.alpha\n #print state, 'action: ', action\n #print 'Q[%s]: %s' % (state, self.Q[state])\n #print 'Q val: ', Q_val\n \n self.Q[state][action] = Q_val\n \n return None", "def update(self):\n result = [], 0, False\n\n if self.t % self.t_train_freq == 0:\n result = self.q_learning_minibatch()\n\n if self.t % self.t_target_q_update_freq == self.t_target_q_update_freq - 1:\n # Copy \n self.update_target_q_network()\n\n return result", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n Q_Value = 0 #initializing q value\n\n feat_Extractor = self.featExtractor\n\n weight = self.weights #To get the weight to control exploration and exploitation\n\n features = feat_Extractor.getFeatures(state,action) #to get all the features associated with (state,action) pair\n\n for each_feature in features:\n #refer to README_Reinforcement.txt for the formula at line 11\n temp_Qvalue = weight[each_feature] * features[each_feature] #Q(state,action) = w * featureVector where * is the dotProduct operator\n Q_Value = Q_Value + temp_Qvalue\n\n return Q_Value #Returns final qvalue\n #util.raiseNotDefined()", "def test_double_dqn(self):\n model = DoubleDQNLightning(self.hparams)\n result = self.trainer.fit(model)\n\n self.assertEqual(result, 1)", "def update_qvals(self, state, action, reward):\n self.qvals[(state, action)] = 0", "def update_target_dqn(self):\n\n for learning_parameter in self.dqn.learning_parameters:\n dqn_value = self.dqn.get_value(learning_parameter, self.tf_session)\n if(dqn_value is not None):\n self.target_dqn.set_value(\n learning_parameter, dqn_value, self.tf_session)\n else:\n print(\"Impossible to set value: None\")", "def q1_forward(self, state: torch.Tensor) -> torch.Tensor:\n return self.q_networks[0](state)", "def updateQValue(self, state, action, old_q, reward, future_rewards):\n self.q[(tuple(state), action)] = old_q + self.alpha * (reward + future_rewards - old_q)", "def learn(self):\n ## obtain sample batch using priority based sampling.\n states, actions, rewards, next_states, dones, weights, sample_inds = self.buffer.sample_batch(BETA)\n \n ## obtain the discounted sum of rewards from reward list\n ## also obtain final gamma multiplier\n reduced_rewards, gamma_multipliers = self.reduce_rewards(rewards)\n \n ## convert to tensors\n states = np_to_tensor(states)\n actions = np_to_tensor(actions)\n reduced_rewards = np_to_tensor(reduced_rewards)\n gamma_multipliers = np_to_tensor(gamma_multipliers)\n next_states = np_to_tensor(next_states)\n dones = np_to_tensor(dones)\n weights = np_to_tensor(np.array(weights))\n \n #### Updating Qnet\n \n ## actions from the target actor network\n greedy_actions = self.actor_target(next_states)\n ## compute temporal difference\n targets = reduced_rewards + torch.mul( torch.mul(gamma_multipliers , self.QNetwork_target(next_states, greedy_actions)) , (1-dones).unsqueeze(1))\n Q_sa = self.QNetwork_local(states, actions)\n \n td_error = targets - Q_sa\n \n ## update the priorities using temporal differences\n self.buffer.update_priority(sample_inds,\n (td_error).detach().abs().squeeze().cpu().data.numpy()+REPLAY_EPS)\n \n ## compute the loss, importance sampling weights are used\n loss = ((td_error).pow(2)*weights).mean()\n \n self.QNet_optim.zero_grad()\n loss.backward()\n self.QNet_optim.step()\n \n ### Updating Actor\n pred_actions = self.actor_local(states)\n actor_loss = - self.QNetwork_local(states, pred_actions).mean()\n \n self.actor_optim.zero_grad()\n actor_loss.backward()\n self.actor_optim.step()\n \n #### Polyak Updates\n self.soft_update(self.QNetwork_local, self.QNetwork_target, TAU)\n self.soft_update(self.actor_local, self.actor_target, TAU)", "def get_q_values(self, state, network):\n out = None\n state = state.permute(0, 3, 1, 2)\n #pdb.set_trace()\n ##############################################################\n ################ YOUR CODE HERE - 4-5 lines lines ################\n if network == 'q_network':\n out = self.q_network(state)\n else:\n out = self.target_network(state)\n ##############################################################\n ######################## END YOUR CODE #######################\n return out", "def batch_q_learning(self):\n\n if(self.memory.get_usage() > Parameters.AGENT_HISTORY_LENGTH):\n\n state_t, action, reward, state_t_plus_1, terminal, i_s_weights, memory_indices = self.memory.bring_back_memories()\n\n q_t_plus_1 = self.tf_session.run(\n self.target_dqn.q_values, {\n self.target_dqn_input: state_t_plus_1})\n max_q_t_plus_1 = np.max(q_t_plus_1, axis=1)\n\n target_q_t = (1. - terminal) * \\\n Parameters.DISCOUNT_FACTOR * max_q_t_plus_1 + reward\n\n _, q_t, losses = self.tf_session.run([self.dqn.optimize, self.dqn.q_values, self.dqn.errors],\n {\n self.dqn.target_q: target_q_t,\n self.dqn.action: action,\n self.dqn_input: state_t,\n self.dqn.i_s_weights: i_s_weights\n })\n\n self.memory.update(\n memory_indices,\n np.squeeze(q_t),\n losses,\n self.get_learning_completion())\n input_shape = (1, Parameters.IMAGE_HEIGHT, Parameters.IMAGE_WIDTH, Parameters.AGENT_HISTORY_LENGTH)\n dqn_input = self.environment.get_input().reshape(input_shape)\n q_values = self.tf_session.run(\n self.dqn.q_values, {\n self.dqn_input: dqn_input})\n Plotter.add_q_values_at_t(q_values)\n else:\n print('[WARNING] Not enough memory for a batch')", "def Q_net(self, state):\n\t\tif not self._prediction_made: \n\t\t\tQ = tf.matmul(tf.nn.relu( tf.matmul(state, self.weights_hidden) + self.bias_hidden ), self.weights_out) + self.bias_out \n\t\t\tself._Qval = Q\t\n\t\t\tself._prediction_made = True\n\t\treturn self._Qval", "def updateQTable( self, reward, current_state ):", "def act(self, q_values, *args, **kwargs):\n pass", "def forward(self, x1, x2):\n return x1 * self.Q + (1 - self.Q) * x2", "def update_Q(self, state, action, reward, state_prime):\n # print \"Updating Q MATRIX\"\n\n # Q(s,a) = (1- alpha)*Q(s,a) + alpha*(reward + gamma * max_Q(s', a'))\n\n # Init value if it doesn't exist: Q(self.state, self.action) = 0\n if (state, action) not in self.Q:\n self.Q[(state, action)] = self.Q_default_value\n\n self.Q[(state, action)] = (1 - self.alpha) * self.Q[(state, action)] + \\\n self.alpha * (reward + self.gamma * self.max_Q_by_state(state_prime))", "def test_double_dqn_loss(self):\n\n loss = double_dqn_loss(self.batch, self.net, self.target_net)\n assert isinstance(loss, Tensor)", "def learn(self):\n batch = self.agent.replay_buffer.sample(self.batch_size)\n states = torch.tensor([x.state for x in batch], dtype=torch.float32).to(self.agent.device) # shape == (batch_size, 3, 6, 7)\n actions = [x.action for x in batch]\n rewards = torch.tensor([x.reward for x in batch], dtype=torch.float32).to(self.agent.device)\n next_states = torch.tensor([x.next_state for x in batch], dtype=torch.float32).to(self.agent.device)\n dones = [x.done for x in batch]\n\n self.optimizer.zero_grad()\n\n\n q_vals = self.agent.policy_net(states)[range(len(actions)), actions] # Q vals for actions taken\n q_next_vals = self.agent.target_net(next_states).detach() # we don't care about grad wrt target net\n q_next_vals[dones] = 0.0 # terminal states have no future expected value\n q_targets = rewards + self.gamma * torch.max(q_next_vals, dim=1)[0]\n\n # all_q_vals = self.agent.policy_net(states)\n # print()\n # print('actions')\n # print(actions)\n # print()\n # print('original all q vals')\n # print(self.agent.policy_net(states)) \n # print(self.agent.policy_net(states).shape)\n # print()\n # print('QVALS:', q_vals)\n # print(q_vals.shape)\n # print('\\n\\n')\n # print('QTARGETS:', q_targets)\n # print(q_targets.shape)\n\n # breakpoint()\n\n loss = self.loss_fn(q_targets, q_vals).to(self.agent.device)\n loss.backward()\n \n # for layer in self.agent.policy_net.named_parameters():\n \n # # print(f'layer: {layer[0]}')\n # # print(f'grad:', layer[1].grad)\n\n # # print('loss', loss)\n # # print('q_vals grad:', q_vals.grad)\n # # print('states:', )\n\n self.optimizer.step()\n\n self.agent.learning_iters += 1\n if self.agent.learning_iters % self.target_update_freq == 0:\n self.agent.update_target_net()\n # logger.info('Updated target net')", "def qdot_callback (qdot_data):\n global q,qdot,eff,key,pub,qdotprev,qdotprevprev,qold,qdotpnone,qdotppnone,qmin,qmax\n qdot = np.array([qdot_data.velocity]).transpose() # store received vector in global variable\n eff = qdot_data.effort[0] # store new seq number\n tosend = JointState() # joint state object to be sent\n qtmp = q # store old q in qtmp\n\n # Integration\n if not qdotppnone: # can use Simpson integration\n q = qold + DT * (qdot + qdotprevprev + 4 * qdotprev) / 3\n elif not qdotpnone: # can use trap integration\n q = q + DT * (qdot + qdotprev) * .5\n #qdotppnone = False\n else: # can use rectangular integration\n q = q + DT * qdot\n qdotpnone = False\n\n for i in range(7):\n q[i] = sat(q[i],qmin[i],qmax[i])\n\n # Update past values\n qdotprev = qdot\n qdotprevprev = qdotprev\n qold = qtmp\n\n # Fill in and send object tosend\n tosend.position = q\n tosend.effort = [eff]\n tosend.header.stamp = rospy.Time.now()\n pub.publish(tosend)", "def update_q_values(self, s_current, action, r_next, s_next, action_next):\n action = actions_indexes[action]\n action_next = actions_indexes[action_next]\n self.q_values[s_current, action] += self.alpha * (\n r_next + self.gamma * self.q_values[s_next, action_next] - self.q_values[s_current, action])", "def update_target_network(self, tau):\n for p_target, p_local in zip(self.q_network_target.parameters(), self.q_network_local.parameters()):\n p_target.data.copy_(tau * p_local.data + (1.0-tau) * p_target.data)", "def getQValue(self, state, action):\n \"\"\"Description:\n [Enter a description of what you did here.]\n Use first equation in slide 71 of MDP to compute q-value depond on weights and current features.\n \n !! But I think what I did is not work for IdentityExtractor. Because feature of IdentityExtrator always return 1,\n it did not change even a ghost is closing.\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n qValue = self.weight * self.featExtractor.getFeatures(state,action)\n return qValue\n \"\"\" END CODE \"\"\"", "def update(self, s, a, r, t, s_prime):\n # Q-learning update:\n #Q(s,a) = Q(s,a) + \\alpha * [r + \\gamma * \\max_a Q(s',a) - Q(s,a)]\n\n #raise NotImplementedError(\"Implement Q-learning update\")\n\n # TODO Q-learning update\n # Two parts:\n target = 0\n if t == True:\n target = r\n else:\n # 1) Compute the target value reward + DISCOUNT * \\max_a Q(s', a)\n target = r + DISCOUNT_FACTOR * np.max(self.q[s_prime])\n # 2) Update Q-values with Q(s, a) += LEARNING_RATE * (target - Q(s, a))\n self.q[s,a] += LEARNING_RATE * (target - self.q[s, a])\n\n # 1) Compute target\n # Note: If s_prime (s') is a terminal state (t), then target is only \"target = reward\"\n # (You will need an if-else struct)\n \n\n # 2) Update Q-values", "def act(self, state):\n state = torch.from_numpy(state).float().view(1,-1)\n self.target_q.eval()\n action = self.target_q(state).to(device=device).max(1)[1].item()\n return action", "def learn(self):\n \n # target parameter update\n # target parameter update\n if self.learn_step_counter % self.nu_iter == 0:\n self.target_net.load_state_dict(self.eval_net.state_dict())\n #testing the preformace of the network\n if self.learn_step_counter == 0:\n print('As referece this first test on dev data. Is maded with the Q networks, initialized randomly : ' )\n else:\n print(\"\\n Lets copy the Q-value Net in to Q-target net!. And test the performace on the dev data: \")\n \n current_bleu = self.dev_network()\n print(\"Current Bleu score is: \", current_bleu)\n \n self.learn_step_counter += 1\n\n \n long_Batch = self.sample_size*3\n # Sampling the higgest rewards values\n b_memory_big = self.memory[np.argsort(-self.memory[:-self.max_output_length, self.state_size+1])][:long_Batch]\n \n sample_index = np.random.choice(long_Batch, self.sample_size)\n b_memory = b_memory_big[sample_index, :]\n\n b_s = torch.FloatTensor(b_memory[:, :self.state_size])\n b_a = torch.LongTensor(b_memory[:, self.state_size:self.state_size+1].astype(int))\n b_r = torch.FloatTensor(b_memory[:, self.state_size+1:self.state_size+2])\n b_s_ = torch.FloatTensor(b_memory[:, self.state_size+2: self.state_size+2 + self.state_size])\n\n b_is_eos = torch.FloatTensor(b_memory[:, self.size_memory1-1:]).view(self.sample_size, 1)\n #print(b_a, b_a.size)\n #print(b_is_eos)\n #Activate the eval_net\n unfreeze_model(self.eval_net)\n \n # q_eval w.r.t the action in experience\n q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)\n q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate\n #taking the most likely action.\n b_a_ = torch.LongTensor(q_next.max(1)[1].view(self.sample_size, 1).long())\n #b_a_ = q_next.max(1)[0].view(self.sample_size, 1).long() # shape (batch, 1)\n q_eval_next = self.eval_net(b_s_).gather(1, b_a_) # shape (batch, 1)\n \n #If eos q_target = reward. \n q_target = b_r + self.gamma * b_is_eos* q_eval_next.view(self.sample_size, 1) # shape (batch, 1)\n #version 0\n #q_target = b_r + self.gamma * q_next.max(1)[0].view(self.sample_size, 1) # shape (batch, 1)\n \n loss = self.loss_func(q_eval, q_target)\n \n self.tb_writer.add_scalar(\"learn/learn_batch_loss\",\n loss.data, self.learn_step_counter)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n #desctivate the eval_net\n freeze_model(self.eval_net)", "def tweak_q(self, q):\n self._q = q\n self.reset()", "def update1(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n ############################################################################################################ Eric Changed nextState to other stuff\n \n actionList = nextState.getLegalActions(self.index)\n\n if (not (nextState == None)) and len(actionList) > 0 :\n expectedRewardList = []\n #print \"state \",nextState,\" has legal actions \", state.getLegalActions(nextState)\n for a in actionList:\n #print \"next state: \",nextState,\" action: \",a, \"Value: \", self.Q[(nextState, a)]\n expectedRewardList.append(self.Q[(nextState, a)])\n #print \"expected reward list: \", expectedRewardList\n self.Q[(state, action)] = self.Q[(state, action)] + self.alpha * (reward + self.discount * max(expectedRewardList) - self.Q[(state, action)])\n #print self.Q\n return\n else:\n self.Q[(state, action)] = self.Q[(state, action)] + self.alpha * (reward - self.Q[(state, action)])\n return\n\n #print \"I should never be here\"\n #util.raiseNotDefined()", "def perform_q_learning(self, prev_state: str, state: str, action: Action, reward):\n max_future_q = max(self.Qs[state].values())\n self.Qs[prev_state][action] = (1.0 - self.alpha) * self.Qs[prev_state][\n action] + self.alpha * (reward + self.gamma * max_future_q)\n self.Vs[prev_state] = max(self.Qs[prev_state].values())", "def update_Qtable(self, r, action, next_state):\n if self.learning:\n # 8. When learning, update the q table according\n # to the given rules\n\n # The reward the turkey get after doing the action\n reward_t1 = r\n\n # Code Review1: Replace the following code\n '''\n # Get the key & value which enables the max Q value in State(t+1)\n max_Q_St1_info = max(self.Qtable[next_state].items(), key=operator.itemgetter(1))\n\n # The action which enables the max Q value in State(t+1)\n max_Q_St1_action = max_Q_St1_info[0]\n\n # The max Q value in State(t+1)\n max_Q_St1 = max_Q_St1_info[1]\n '''\n max_Q_St1 = max(self.Qtable[next_state].values())\n\n Q_old = self.Qtable[self.state][action]\n Q_new = reward_t1 + self.gamma * max_Q_St1\n\n self.Qtable[self.state][action] = (\n 1 - self.alpha) * Q_old + self.alpha * Q_new", "def update_q_net(\n q_net: VisualQNetwork, \n optimizer: torch.optim, \n buffer: Buffer, \n action_size: int\n ):\n BATCH_SIZE = 1000\n NUM_EPOCH = 3\n GAMMA = 0.9\n batch_size = min(len(buffer), BATCH_SIZE)\n random.shuffle(buffer)\n # Split the buffer into batches\n batches = [\n buffer[batch_size * start : batch_size * (start + 1)]\n for start in range(int(len(buffer) / batch_size))\n ]\n for _ in range(NUM_EPOCH):\n for batch in batches:\n # Create the Tensors that will be fed in the network\n obs = torch.from_numpy(np.stack([ex.obs for ex in batch]))\n reward = torch.from_numpy(\n np.array([ex.reward for ex in batch], dtype=np.float32).reshape(-1, 1)\n )\n done = torch.from_numpy(\n np.array([ex.done for ex in batch], dtype=np.float32).reshape(-1, 1)\n )\n action = torch.from_numpy(np.stack([ex.action for ex in batch]))\n next_obs = torch.from_numpy(np.stack([ex.next_obs for ex in batch]))\n\n # Use the Bellman equation to update the Q-Network\n target = (\n reward\n + (1.0 - done)\n * GAMMA\n * torch.max(q_net(next_obs).detach(), dim=1, keepdim=True).values\n )\n mask = torch.zeros((len(batch), action_size))\n mask.scatter_(1, action, 1)\n prediction = torch.sum(qnet(obs) * mask, dim=1, keepdim=True)\n criterion = torch.nn.MSELoss()\n loss = criterion(prediction, target)\n\n # Perform the backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()", "def Q(self):\n self.dualEigenmatrix()", "def _Q(state, action):\n action = tuple(action)\n print(\"CALLING Q({}, {})\".format(state, action))\n # minimize this not maximize\n t, X, Y = state\n if t == 1:\n assert action is None, 'No choice for action in last state!'\n result = terminal_reward(state)\n else:\n gamma = 1\n x, y = action_to_xy(action, X, Y)\n reward = F(x - y)\n state_ = take_step(state, action)\n if t == 2:\n # in this case just have reward due to action and terminal state reward\n result = reward + gamma * terminal_reward(state_)\n else:\n def fun(action):\n return Q(state_, action)\n alpha_x0 = 0.5\n alpha_y0 = 0.5\n # watch out, Q is not differentiable (piecewise linear) so maybe trouble\n res = so.minimize(fun, (alpha_x0, alpha_y0), **min_args)\n bok('Q')\n result = reward + gamma * res.fun\n return result", "def Q(self, states, neural_net_to_use, no_grad = False):\r\n\r\n states = torch.from_numpy(states)\r\n states = states.float()\r\n\r\n if no_grad:\r\n with torch.no_grad():\r\n output = neural_net_to_use(states)\r\n return output\r\n\r\n output = neural_net_to_use(states)\r\n return output", "def update_q_value(q_table: np.ndarray, state: int, action: int,\n learning_rate: float, current_reward: float,\n new_state: int, discount_rate: float) -> None:\n q_table[state, action] = new_q_value(q_table, state, action, learning_rate,\n current_reward, new_state,\n discount_rate)", "def update_target_network(self):\n self.target_dqn.set_weights.remote(self.dqn.get_weights.remote())", "def choose_action( self):\n \"\"\"greedy, random, e-greedy, boltzmann, bayesian\"\"\"\n\tif self.exploration == \"greedy\":\n #Choose an action with the maximum expected value.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"random\":\n #Choose an action randomly.\n a = env.action_space.sample()\n if self.exploration == \"e-greedy\":\n #Choose an action by greedily (with e chance of random action) from the Q-network\n if np.random.rand(1) < e or total_steps < pre_train_steps:\n a = env.action_space.sample()\n else:\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"boltzmann\":\n #Choose an action probabilistically, with weights relative to the Q-values.\n Q_d,allQ = sess.run([q_net.Q_dist,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.Temp:e,q_net.keep_per:1.0})\n a = np.random.choice(Q_d[0],p=Q_d[0])\n a = np.argmax(Q_d[0] == a)\n return a\n if self.exploration == \"bayesian\":\n #Choose an action using a sample from a dropout approximation of a bayesian q-network.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:(1-e)+0.1})\n a = a[0]\n return a", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n # print \"update\"\n oldValue = self.getQValue(state, action)\n sample = reward + self.discount*self.computeValueFromQValues(nextState)\n self.qValues[(state, action)] = (1-self.alpha)*oldValue + self.alpha*(sample)", "def train(network_def, target_params, optimizer, states, actions, next_states, rewards,\n terminals, loss_weights, cumulative_gamma, target_opt, mse_inf,tau,alpha,clip_value_min, rng):\n online_params = optimizer.target\n def loss_fn(params, rng_input, target, loss_multipliers):\n def q_online(state):\n return network_def.apply(params, state, rng=rng_input)\n\n q_values = jax.vmap(q_online)(states).q_values\n q_values = jnp.squeeze(q_values)\n replay_chosen_q = jax.vmap(lambda x, y: x[y])(q_values, actions)\n \n if mse_inf:\n loss = jax.vmap(mse_loss)(target, replay_chosen_q)\n else:\n loss = jax.vmap(dqn_agent.huber_loss)(target, replay_chosen_q)\n\n mean_loss = jnp.mean(loss_multipliers * loss)\n return mean_loss, loss\n\n rng, rng2, rng3, rng4 = jax.random.split(rng, 4)\n\n def q_target(state):\n return network_def.apply(target_params, state, rng=rng2)\n\n def q_target_online(state):\n return network_def.apply(online_params, state, rng=rng4)\n\n if target_opt == 0:\n target = dqn_agent.target_q(q_target, next_states, rewards, terminals, cumulative_gamma) \n elif target_opt == 1:\n #Double DQN\n target = target_DDQN(q_target_online, q_target, next_states, rewards, terminals, cumulative_gamma)\n\n elif target_opt == 2:\n #Munchausen\n target = target_m_dqn(q_target_online, q_target, states,next_states,actions,rewards,terminals,\n cumulative_gamma,tau,alpha,clip_value_min)\n else:\n print('error')\n\n grad_fn = jax.value_and_grad(loss_fn, has_aux=True)\n (mean_loss, loss), grad = grad_fn(online_params, rng3, target, loss_weights)\n optimizer = optimizer.apply_gradient(grad)\n return optimizer, loss, mean_loss", "def target_m_dqn(model, target_network, states, next_states, actions,rewards, terminals, \n cumulative_gamma,tau,alpha,clip_value_min):\n \n #----------------------------------------\n q_state_values = jax.vmap(target_network, in_axes=(0))(states).q_values\n q_state_values = jnp.squeeze(q_state_values)\n \n next_q_values = jax.vmap(target_network, in_axes=(0))(next_states).q_values\n next_q_values = jnp.squeeze(next_q_values)\n #----------------------------------------\n\n tau_log_pi_next = stable_scaled_log_softmax(next_q_values, tau, axis=1)\n pi_target = stable_softmax(next_q_values,tau, axis=1)\n replay_log_policy = stable_scaled_log_softmax(q_state_values, tau, axis=1)\n\n #----------------------------------------\n \n replay_next_qt_softmax = jnp.sum((next_q_values-tau_log_pi_next)*pi_target,axis=1)\n\n replay_action_one_hot = nn.one_hot(actions, q_state_values.shape[-1])\n tau_log_pi_a = jnp.sum(replay_log_policy * replay_action_one_hot, axis=1)\n\n #a_max=1\n tau_log_pi_a = jnp.clip(tau_log_pi_a, a_min=clip_value_min,a_max=1)\n\n munchausen_term = alpha * tau_log_pi_a\n modified_bellman = (rewards + munchausen_term +cumulative_gamma * replay_next_qt_softmax *\n (1. - jnp.float32(terminals)))\n \n return jax.lax.stop_gradient(modified_bellman)", "def setQ(self,Q):\n self.Q = Q", "def qUpdate(self,state,action,reward,next_state):\r\n #add to experience\r\n if next_state != \"end\":\r\n self.experience.append([self.feat_funct(state),action,\r\n reward,self.feat_funct(next_state)])\r\n else:\r\n self.experience.append([self.feat_funct(state),action,\r\n reward,next_state]) \r\n #print(state,action,reward,next_state)\r\n #get minibatch\r\n sample = np.random.randint(0,len(self.experience),self.batch_size)\r\n d = np.zeros((self.batch_size,self.size))\r\n y = np.zeros((self.batch_size,1))\r\n for i,row in enumerate(sample):\r\n state,action,reward,next_state = self.experience[row]\r\n #get feature vector\r\n d[i,:] = self.comb_feat_action(state,action)\r\n #get target\r\n #check if end of episoe\r\n if next_state == 'end':\r\n y[i] = reward\r\n else:\r\n y[i] = reward + self.gamma * self.maxOldQ(next_state)\r\n #print(row,next_state)\r\n #print(self.maxOldQ(next_state))\r\n \r\n loss = self.train(d,y)\r\n \r\n #update old learner if greater than num_update\r\n if self.iteration % self.num_update == 0:\r\n self.old_learn = copy.deepcopy(self.learner)\r\n self.iteration += 1\r\n \r\n return(loss)", "def buildQ(self):\r\n\r\n print 'Building Q ...'\r\n\r\n self.y = T.matrix('y')\r\n\r\n mlp = MLP(activations=self.hyper['q_activs'],\r\n dims=self.hyper['q_dims'],\r\n weights_init=self.hyper['q_W_init'],\r\n biases_init=Constant(0))\r\n\r\n q_parameters = mlp.apply(self.y)\r\n mlp.initialize()\r\n\r\n # self.qxgy_mu.shape == (minibatch size, num of dimension of x)\r\n self.qxgy_mu = q_parameters[:,:self.hyper['x_dim']]\r\n\r\n # self.qxgy_var.shape == (minibatch size, num of dimension of x)\r\n self.qxgy_var = T.exp( q_parameters[:,self.hyper['x_dim']:2*self.hyper['x_dim']] )\r\n\r\n # self.qwgy_mu.shape == (minibatch size, num of dimension of w)\r\n self.qwgy_mu = q_parameters[:,2*self.hyper['x_dim']:2*self.hyper['x_dim']+self.hyper['w_dim']]\r\n\r\n # self.qwgy_var.shape == (minibatch size, num of dimension of w)\r\n self.qwgy_var = T.exp( q_parameters[:,2*self.hyper['x_dim']+self.hyper['w_dim']:] )\r\n\r\n\r\n #---Will be useful to compute samples from q(x|y)---#\r\n #self.eps_x.shape == (minibatch size, # of x samples , # of dimension of x)\r\n self.eps_x = self.srng.normal((self.qxgy_mu.shape[0] ,self.hyper['L_x'] ,self.hyper['x_dim']))\r\n\r\n #self.x corresponds roughly to the function g(\\epsilon,y) (see reparametrization trick in Kingma 2014)\r\n #self.x.shape == (minibatch size, # of x samples , # of dimension of x)\r\n self.x = self.qxgy_mu.dimshuffle(0,'x',1) + T.sqrt(self.qxgy_var).dimshuffle(0,'x',1)*self.eps_x\r\n\r\n #---Will be useful to compute samples from q(w|y)---#\r\n #self.eps_w.shape == (minibatch size, # of w samples , # of dimension of w)\r\n self.eps_w = self.srng.normal((self.qwgy_mu.shape[0] ,self.hyper['L_w'] ,self.hyper['w_dim']))\r\n\r\n #self.w corresponds roughly to the function g(\\epsilon,y) (see reparametrization trick in Kingma 2014)\r\n #self.w.shape == (minibatch size, # of w samples , # of dimension of w)\r\n self.w = self.qwgy_mu.dimshuffle(0,'x',1) + T.sqrt(self.qwgy_var).dimshuffle(0,'x',1)*self.eps_w\r\n\r\n\r\n #---Building the log density q(x|y)---#\r\n little_num = 10**(-32)\r\n inside_exp = -T.sum((self.x - self.qxgy_mu.dimshuffle(0,'x',1))**2/(2*self.qxgy_var.dimshuffle(0,'x',1)), axis=2)\r\n norm_cst = (2*np.pi)**(-self.hyper['x_dim']/2.)*T.exp(T.sum(T.log(self.qxgy_var), axis=1))**(-1/2.)\r\n\r\n # shape == (minibatch size, # of x samples)\r\n qxgy = norm_cst.dimshuffle(0,'x')*T.exp(inside_exp)\r\n\r\n # shape == (minibatch size, # of x samples)\r\n self.log_qxgy = T.log(qxgy + little_num)", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n\n # Double DQN. Uses local network for action selection and target network for value estimation\n # see: https://arxiv.org/pdf/1509.06461.pdf\n Q_actions_next = self.dqn_local(next_states).detach().argmax(1).unsqueeze(1)\n Q_targets_next = self.dqn_target(next_states).gather(1, Q_actions_next)\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n # Standard DQN\n # Get max predicted Q values (for next states) from target model\n # Q_targets_next = self.dqn_target(next_states).detach().max(1)[0].unsqueeze(1)\n # Compute Q targets for current states\n # Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.dqn_local(states).gather(1, actions)\n\n # Compute loss\n loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.dqn_local, self.dqn_target, TAU)", "def target_DDQN(model, target_network, next_states, rewards, terminals, cumulative_gamma):\n next_q_values = jax.vmap(model, in_axes=(0))(next_states).q_values\n next_q_values = jnp.squeeze(next_q_values)\n replay_next_qt_max = jnp.argmax(next_q_values, axis=1)\n next_q_state_values = jax.vmap(target_network, in_axes=(0))(next_states).q_values\n\n q_values = jnp.squeeze(next_q_state_values)\n replay_chosen_q = jax.vmap(lambda t, u: t[u])(q_values, replay_next_qt_max)\n \n return jax.lax.stop_gradient(rewards + cumulative_gamma * replay_chosen_q *\n (1. - terminals))", "def update(self, sess, batch, *args, **kwargs):\n # Calculated target Q values using target estimator\n assert \"state\" in batch and \"action\" in batch and \\\n \"reward\" in batch and \"next_state\" in batch and \\\n \"episode_done\" in batch\n target_q_val = self._target_estimator.estimate(\n batch[\"state\"], batch[\"action\"], batch[\"reward\"],\n batch[\"next_state\"], batch[\"episode_done\"])\n\n # Prepare data and fit Q network\n feed_dict = {self._input_target_q: target_q_val,\n self._input_action: batch[\"action\"]}\n if \"_weight\" in batch:\n feed_dict[self._input_sample_weight] = batch[\"_weight\"]\n feed_dict.update(self._q.input_dict(batch[\"state\"]))\n fetch_dict = {\n \"action\": batch[\"action\"], \"reward\": batch[\"reward\"],\n \"done\": batch[\"episode_done\"],\n \"q\": self.selected_q, \"target_q\": target_q_val,\n \"optimizer_loss\": self._sym_loss,\n \"td\": self._op_td,\n \"td_losses\": self._op_losses,\n \"td_losses_weighted\": self._op_losses_weighted}\n update_run = network.UpdateRun(feed_dict=feed_dict, fetch_dict=fetch_dict)\n\n return update_run", "def act(self, state, eps=0.):\n state = torch.from_numpy(state).float().unsqueeze(0).to(device)\n self.qnetwork_local.eval()\n with torch.no_grad():\n action_values = self.qnetwork_local(state)\n self.qnetwork_local.train()\n\n # Epsilon-greedy action selection\n if random.random() > eps:\n return np.argmax(action_values.cpu().data.numpy())\n else:\n return random.choice(np.arange(self.action_size))", "def act(self, state, eps=0.):\n state = torch.from_numpy(state).float().unsqueeze(0).to(device)\n self.qnetwork_local.eval()\n with torch.no_grad():\n action_values = self.qnetwork_local(state)\n self.qnetwork_local.train()\n\n # Epsilon-greedy action selection\n if random.random() > eps:\n return np.argmax(action_values.cpu().data.numpy())\n else:\n return random.choice(np.arange(self.action_size))", "def algorithm(self):\n convergence_threshold = 50\n reward_num_threshold = 300\n alpha = 1\n gamma = 0.5\n while (self.reward_num < reward_num_threshold) and (self.count<convergence_threshold):\n print('------')\n print('Iteration', self.reward_num, '/', reward_num_threshold)\n print('Iterations w/out Q-update:', self.count, '/', convergence_threshold)\n # select a possible action (any of them; all are valid)\n s = self.get_state_num()\n print(\"Initial state:\", s)\n a = random.choice(np.arange(3))\n self.apply_action(a)\n while self.reward == None:\n #print(\"Sleeping to wait for reward\")\n rospy.sleep(0.5)\n reward = self.reward\n print(\"REWARD =\", reward)\n self.reward = None\n if reward == 0:\n next_state = self.get_state_num()\n mx = np.amax(self.Q[next_state])\n else:\n ## There is no next state if nonzero reward seen\n mx = 0\n update = self.Q[s][a] + alpha*(reward+gamma*mx-self.Q[s][a])\n if self.Q[s][a] != update:\n print(\"Update Q matrix\")\n self.Q[s][a] = update\n self.count = 0\n else:\n self.count += 1\n\n print(\"Finished calculating Q-Matrix\\n\\n\\n\\n\\n\\n\\n\")", "def Tlosses(self, dq = np.zeros(1) , ddq = np.zeros(1)): \n \n T = np.dot( self.Ia , ddq ) + np.dot( self.Da , dq )\n \n return T", "def step(self, state, action, reward, next_state, done):\n\n if done:\n self.Q[state][action] = self.update_Qsa(self.Q[state][action],0,reward,self.alpha, self.gamma)\n else:\n best_next_a = np.argmax(self.Q[next_state])\n self.Q[state][action] = self.update_Qsa(self.Q[state][action],self.Q[next_state][best_next_a],reward,self.alpha, self.gamma)", "def calculateTarget(self, qValuesNewState, reward, isFinal):\n if isFinal:\n return reward\n else :\n return reward + self.discountFactor * self.getMaxQ(qValuesNewState)", "def _compute_dqn_loss(self, samples: Dict[str, np.ndarray], gamma: float) -> torch.Tensor:\n device = self.device # for shortening the following lines\n state = torch.FloatTensor(samples[\"obs\"]).permute(0,3,1,2).to(device)\n next_state = torch.FloatTensor(samples[\"next_obs\"]).permute(0,3,1,2).to(device)\n action = torch.LongTensor(samples[\"acts\"]).to(device)\n reward = torch.FloatTensor(samples[\"rews\"].reshape(-1, 1)).to(device)\n done = torch.FloatTensor(samples[\"done\"].reshape(-1, 1)).to(device)\n \n # Categorical DQN algorithm\n delta_z = float(self.v_max - self.v_min) / (self.atom_size - 1)\n\n with torch.no_grad():\n # Double DQN\n next_action = self.dqn(next_state).argmax(1)\n next_dist = self.dqn_target.dist(next_state)\n next_dist = next_dist[range(self.batch_size), next_action]\n\n t_z = reward + (1 - done) * gamma * self.support\n t_z = t_z.clamp(min=self.v_min, max=self.v_max)\n b = (t_z - self.v_min) / delta_z\n l = b.floor().long()\n u = b.ceil().long()\n\n offset = (\n torch.linspace(\n 0, (self.batch_size - 1) * self.atom_size, self.batch_size\n ).long()\n .unsqueeze(1)\n .expand(self.batch_size, self.atom_size)\n .to(self.device)\n )\n\n proj_dist = torch.zeros(next_dist.size(), device=self.device)\n proj_dist.view(-1).index_add_(\n 0, (l + offset).view(-1), (next_dist * (u.float() - b)).view(-1)\n )\n proj_dist.view(-1).index_add_(\n 0, (u + offset).view(-1), (next_dist * (b - l.float())).view(-1)\n )\n\n dist = self.dqn.dist(state)\n log_p = torch.log(dist[range(self.batch_size), action])\n elementwise_loss = -(proj_dist * log_p).sum(1)\n\n return elementwise_loss", "def make_q_update(self, reward, state: str, joint_action: Dict[int, str], next_state, alpha: float, gamma: float):\n previous_value = self.Q_t[state][(joint_action[0], joint_action[1])]\n if '(0, 0)' in next_state:\n max_future_reward = 0\n else:\n max_future_reward = max(self.Q_t[next_state].values())\n new_value = reward + gamma * max_future_reward\n\n self.Q_t[state][(joint_action[0], joint_action[1])] = (1 - alpha) * previous_value + alpha * new_value", "def _build_target_q_op(self):\n targets = []\n for gamma, target_q in zip(self.gammas,\n self._replay_next_target_net_outputs.q_values):\n # Get the maximum Q-value across the actions dimension.\n replay_next_qt_max = tf.reduce_max(target_q, 1)\n\n # Calculate the Bellman target value.\n # Q_t = R_t + \\gamma^N * Q'_t+1\n # where,\n # Q'_t+1 = \\argmax_a Q(S_t+1, a)\n # (or) 0 if S_t is a terminal state,\n # and\n # N is the update horizon (by default, N=1).\n cumulative_gamma = math.pow(gamma, self.update_horizon)\n n_step_reward = self._build_discounted_n_step_rewards(gamma)\n targets.append(n_step_reward + cumulative_gamma * replay_next_qt_max *\n (1. - tf.cast(self._replay.terminals, tf.float32)))\n return targets", "def _compute_dq(self, finger_id, xdes, q0):\n Ji = self.compute_jacobian(finger_id, q0)[:3, :]\n frame_id = self.tip_link_ids[finger_id]\n xcurrent = self.data.oMf[frame_id].translation\n Jinv = np.linalg.pinv(Ji)\n return Jinv.dot(xdes - xcurrent)", "def q_update(self):\n\n # exit if the experience buffer is not yet large enough\n if self.experience_buffer.size < self.batch_size:\n return\n \n # get the random batch\n states, action_indices, rewards, not_terminals, succ_states, succ_players, succ_legal_moves = self.experience_buffer.random_batch(self.batch_size)\n states = states.to(Globals.device)\n action_indices = action_indices.to(Globals.device)\n rewards = rewards.to(Globals.device)\n not_terminals = not_terminals.to(Globals.device)\n succ_states = succ_states.to(Globals.device)\n succ_players = succ_players.to(Globals.device)\n\n # prepare the training data\n q_values = self.target_network(succ_states)\n target = torch.empty(1, self.batch_size)\n for i in range(self.batch_size):\n if not_terminals[i] == 0:\n target[0, i] = rewards[i]\n continue\n\n if succ_players[i] == CONST.WHITE_MOVE:\n legal_q_values = q_values[0, 0:9][succ_legal_moves[i]]\n q_value, _ = legal_q_values.max(0)\n else:\n legal_q_values = q_values[0, 9:18][succ_legal_moves[i]]\n q_value, _ = legal_q_values.min(0)\n\n target[0, i] = rewards[i] + self.disc*not_terminals[i]*q_value\n\n # execute the training step of the network\n self.training_network.train_step(states, target, action_indices) # the eligibility trace is used as td target", "def train_step(self):\n # Sample training batch from replay\n training_batch = self.replay.sample(self.batch_size)\n\n # Calculate target Q values for each example:\n # For non-terminal states, targetQ is estimated according to\n # targetQ = r + gamma*Q'(s',max_a Q(s',a))\n # where Q' denotes the target network.\n # For terminating states the target is computed as\n # targetQ = r\n updates = []\n for exp in training_batch:\n start,_,reward,end = exp\n if(self.dampen_states):\n # To dampen states (usually done after major patches or when the meta shifts)\n # we replace winning rewards with 0.\n reward = 0.\n state_code = end.evaluate()\n if(state_code==DraftState.DRAFT_COMPLETE or state_code in DraftState.invalid_states):\n # Action moves to terminal state\n updates.append(reward)\n else:\n # Follwing double DQN paper (https://arxiv.org/abs/1509.06461).\n # Action is chosen by online network, but the target network is used to evaluate this policy.\n # Each row in predicted_Q gives estimated Q(s',a) values for all possible actions for the input state s'.\n feed_dict = {self.ddq_net.online_ops[\"input\"]:[end.format_state()],\n self.ddq_net.online_ops[\"valid_actions\"]:[end.get_valid_actions()]}\n predicted_action = self.ddq_net.sess.run(self.ddq_net.online_ops[\"prediction\"], feed_dict=feed_dict)[0]\n\n feed_dict = {self.ddq_net.target_ops[\"input\"]:[end.format_state()]}\n predicted_Q = self.ddq_net.sess.run(self.ddq_net.target_ops[\"outQ\"], feed_dict=feed_dict)\n\n updates.append(reward + self.ddq_net.discount_factor*predicted_Q[0,predicted_action])\n\n # Update online net using target Q\n # Experience replay stores action = (champion_id, position) pairs\n # these need to be converted into the corresponding index of the input vector to the Qnet\n actions = np.array([start.get_action(*exp[1]) for exp in training_batch])\n targetQ = np.array(updates)\n feed_dict = {self.ddq_net.online_ops[\"input\"]:np.stack([exp[0].format_state() for exp in training_batch],axis=0),\n self.ddq_net.online_ops[\"actions\"]:actions,\n self.ddq_net.online_ops[\"target\"]:targetQ,\n self.ddq_net.online_ops[\"dropout_keep_prob\"]:0.5}\n _ = self.ddq_net.sess.run(self.ddq_net.online_ops[\"update\"],feed_dict=feed_dict)", "def getDQN(self, shortMemory):\n # sampleSize = self.memorySize // 5 # use only with traces (= short memory larger than 5 entries)\n sampleSize = 1\n\n if len(shortMemory) < sampleSize:\n sample = shortMemory\n else:\n sample = random.sample(shortMemory, sampleSize)\n\n # states\n netInput = []\n for memory in sample:\n netInput.append(\n torch.cat((memory.action.state.strengths, memory.action.state.focus, memory.action.changes)))\n\n netInput = torch.stack(netInput)\n\n # updates for Q-values\n labels = []\n for memory in sample:\n if memory.nextState:\n labels.append(memory.reward)\n else:\n currentQ = self.q.evaluate(memory.action)\n labels.append(currentQ + self.learningRate * (\n self.discount * self.q.evaluateMax(memory.nextState, self.actionSet) - currentQ))\n\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n\n return netInput.float(), labels.float() # casting added due to occasional occurrence of LongTensors <- why?", "def train(self):\n if len(self.buffer) >= self.batch_size:\n with torch.no_grad():\n states, actions, rewards, next_states, dones = self.buffer.sample(self.batch_size)\n\n # Send data to GPU\n states = torch.stack(states).to(self.device, dtype=torch.float)\n actions = torch.stack(actions).to(self.device, dtype=torch.float)\n rewards = torch.stack(rewards).to(self.device, dtype=torch.float)\n rewards = torch.reshape(rewards, (self.batch_size, 1))\n\n next_states = torch.stack(next_states).to(self.device, dtype=torch.float)\n dones = torch.stack(dones).to(self.device, dtype=torch.float)\n\n #TODO\n\n # Calculate target Q values using the Target Network\n selection = torch.argmax(self.main_dqn(next_states), dim = 1).unsqueeze(1)\n\n evaluation = self.target_dqn(next_states)\n evaluation = evaluation.gather(1, selection.long()) #size [256,1]\n\n #Create Done mask\n nonzero_indices = torch.nonzero(dones).reshape(-1).tolist()\n dones_mask = torch.eye(self.batch_size)\n for index in nonzero_indices:\n dones_mask[index,index] = 0\n dones_mask = dones_mask.to(self.device, dtype=torch.float)\n\n # Calculte target\n target = rewards + torch.matmul(dones_mask, evaluation*self.gamma)\n target = target.detach()\n\n # Calculate Q values using the Main Network\n if self.env.freely_moving:\n n_classes = self.env.number_of_action_channels * self.env.number_of_rows * self.env.number_of_columns\n else:\n n_classes = self.env.number_of_action_channels * 1 * self.env.nA\n\n n_samples = self.batch_size\n labels = torch.flatten(actions.type(torch.LongTensor), start_dim=0)\n labels_tensor = torch.as_tensor(labels)\n action_masks = torch.nn.functional.one_hot(labels_tensor, num_classes=n_classes).to(self.device, dtype=torch.float)\n\n q_value = action_masks * self.main_dqn(states)\n q_value = torch.sum(q_value, dim=-1).reshape((self.batch_size, 1))\n\n # Calculate loss\n loss = self.mse(target, q_value)\n\n # Optimize the model\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.main_dqn.parameters(), 5)\n self.optimizer.step()\n\n # Soft Copy the Main Network's weights to the Target Network\n self.soft_update_of_target_network(self.main_dqn, self.target_dqn,tau=1e-3)\n\n return loss\n return 0", "def update_q(self,action,reward):\n #print('')\n #print('Action index is: ' + str(action))\n #print('Provided reward is: ' + str(reward))\n \n # Read from disk before updating\n try:\n pickle_in = open(\"static/data/values.pickle\",\"rb\")\n values = pickle.load(pickle_in)\n #print(values)\n self.values = values\n pickle_in = open(\"static/data/counts.pickle\",\"rb\")\n self.counts = pickle.load(pickle_in)\n pickle_in = open(\"static/data/actions_taken.pickle\",\"rb\")\n actions_taken = pickle.load(pickle_in)\n pickle_in = open(\"static/data/reward_list.pickle\",\"rb\")\n reward_list = pickle.load(pickle_in)\n except:\n actions_taken = []\n reward_list = []\n pass\n \n self.counts[action] += 1\n n = self.counts[action]\n value = self.values[action]\n actions_taken.append(action)\n reward_list.append(reward)\n \n # Running product\n new_value = value + (1/n) * (reward - value)\n self.values[action] = new_value\n \n \n # Save to disk before exiting\n pickle_out = open('static/data/values.pickle','wb')\n pickle.dump(self.values, pickle_out)\n pickle_out.close()\n pickle_out = open('static/data/counts.pickle','wb')\n pickle.dump(self.counts, pickle_out)\n pickle_out.close()\n pickle_out = open('static/data/actions_taken.pickle','wb')\n pickle.dump(actions_taken, pickle_out)\n pickle_out.close()\n pickle_out = open('static/data/reward_list.pickle','wb')\n pickle.dump(reward_list, pickle_out)\n pickle_out.close()", "def update_q_table(self, state, action, reward):\n\n q_key = self.q_key(state, action)\n inputs = self.env.sense(self)\n self.next_waypoint = self.planner.next_waypoint()\n new_state = self.set_current_state(inputs)\n\n #update q_value in q_table according to learning formula\n x = self.q_value(state, action)\n V = reward + (self.gamma * self.max_q_value(new_state))\n new_q_value = x + (self.alpha * (V - x))\n self.q_table[q_key] = new_q_value", "def ipdTft(length,gamma,epsilon,alpha = .8):\r\n #possible previous states (what each did in the last iteration)\r\n states = [(\"*\",\"*\"),(\"C\",\"D\"), (\"C\",\"C\"), (\"D\",\"C\"), (\"D\",\"D\")]\r\n #actions: Defect or Cooperate\r\n actions = [\"D\",\"C\"]\r\n #payoff matrix (as dict)\r\n payoff = {(\"C\",\"D\"): (-3,0), (\"C\",\"C\"): (-1,-1), \r\n (\"D\",\"C\"): (0,-3), (\"D\",\"D\"): (-2,-2)}\r\n #initialize learners \r\n\r\n #q1 = qLearn(states,actions,gamma,alpha,epsilon)\r\n #q1 = QLearnCont(ipd_feats,10,actions,gamma,alpha,epsilon,kernel = 'linear')\r\n #q1 = DQN(ipd_feats,10,actions,.99,.5,.1,learn_type = 'linear')\r\n q1 = DQN(ipd_feats,10,actions,.99,.5,.1,shape = (10,10,1))\r\n #initialize list of rewards\r\n rewards = []\r\n #iterate through length states and run the game\r\n prevState = (\"*\",\"*\")\r\n for i in range(length):\r\n #get actions\r\n print(\"Iteration %i:\" %i)\r\n print(\"Previous State:\", prevState)\r\n qa1 = q1.chooseAction(prevState)\r\n qa2 = tft(prevState[0])\r\n print(\"Player 1 Action:\",qa1)\r\n print(\"Player 2 Action:\",qa2)\r\n \r\n #find payoff\r\n newState = (qa1,qa2)\r\n reward = payoff[newState]\r\n rewards.append(reward[0])\r\n print(\"Player 1 Reward:\", reward[0])\r\n print(\"Player 2 Rewards:\", reward[1])\r\n print(\"Current average reward for Player 1:\",np.mean(rewards))\r\n #assign reward and update Q params\r\n q1.qUpdate(prevState,qa1,reward[0],newState)\r\n \r\n prevState = newState\r\n #print(q1.Q)\r\n return(rewards,q1)", "def expand_q(self) -> Q:\n \"\"\"Expand each term.\"\"\"\n\n self.t = sp.expand(self.t)\n self.x = sp.expand(self.x)\n self.y = sp.expand(self.y)\n self.z = sp.expand(self.z)\n return self", "def __init__(self, learn_q, target_estimator, td_loss_fcn=None):\n super(FitTargetQ, self).__init__()\n # unpack params\n self._q, self._target_estimator = learn_q, target_estimator\n if td_loss_fcn is None:\n td_loss_fcn = tf.square\n # need computed target Q values and selected action as input\n self._input_target_q = tf.placeholder(\n dtype=tf.float32, shape=[None], name=\"input_target_q\")\n self._input_action = tf.placeholder(\n dtype=tf.uint8, shape=[None], name=\"input_action\")\n self._input_sample_weight = tf.placeholder_with_default([1.0], shape=[None], name=\"input_weight\")\n op_q = learn_q.output().op\n num_actions = learn_q.output().op.shape.as_list()[-1]\n self.selected_q = tf.reduce_sum(\n tf.one_hot(self._input_action, num_actions) * op_q, axis=1)\n self._op_td = self.selected_q - self._input_target_q\n self._op_losses = td_loss_fcn(self._op_td)\n self._op_losses_weighted = self._op_losses * self._input_sample_weight\n self._sym_loss = tf.reduce_mean(self._op_losses_weighted)\n self._update_operation = network.MinimizeLoss(self._sym_loss, var_list=self._q.variables)", "def act(self, q_values, *args, **kwargs):\n if np.random.binomial(1, p=self.epsilon_updater.cur_value):\n action = np.array([np.random.choice(range(len(q_values)))])\n else:\n action = np.array([np.argmax(q_values)])\n self.epsilon_updater.update()\n return action", "def _create_target_network_update_op(self, q_network, target_q_network):\n variables = q_network.get_variables()\n target_variables = target_q_network.get_variables()\n # problem\n return tf.group([\n tf.assign(target_v, target_v + self.tau * (v - target_v)) # same as original arm\n for (target_v, v) in zip(target_variables, variables)\n ])", "def __init__(self, state_size, num_actions):\n super(DQN, self).__init__()\n self.num_actions = num_actions\n self.batch_size = 128\n self.epsilon = 0.7\n self.min_epsilon = 0.05\n self.epsilon_update = 0.995\n \n\n # TODO: Define network parameters and optimizer\n \n self.buffer = ReplayMemory(10000)\n\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(0.01, 9000, 0.1)\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)\n \n hidden_sz1 = 256 \n hidden_sz2 = 128\n \n self.Q_1 = tf.keras.layers.Dense(hidden_sz1)\n self.Q_2 = tf.keras.layers.Dense(hidden_sz2)\n self.Q_3 = tf.keras.layers.Dense(self.num_actions)", "def calcQ(self,thisObs,next_action,reward):\n \n thisObs_tup=(thisObs['volume'],thisObs['time'])\n lastAction_tup=(self.lastAction['vol'],self.lastAction['price'])\n lastObs_tup=(self.lastObs['volume'],self.lastObs['time'])\n lastQvalue=0\n maxQvalue=0\n temp_action=()\n \n if (len(self.Qvalue)>0): \n \"\"\"Searches the Q-value dictionary\"\"\"\n for key,value in self.Qvalue.iteritems():\n \n if (key[0][0]== thisObs_tup[0] and key[0][1]==thisObs_tup[1]):\n if (value > maxQvalue):\n maxQvalue=value\n temp_action = key[1]\n \n if (key[0][0]== lastObs_tup[0] and key[0][1]==lastObs_tup[1] and \n key[1][0]== lastAction_tup[0] and key[1][1]==lastAction_tup[1]):\n \n lastQvalue=self.Qvalue[key]\n #print(\"This state was already encoutered and updated\")\n \n self.Qvalue[(lastObs_tup,lastAction_tup)]=lastQvalue+alpha*(reward+(gamma*maxQvalue)-lastQvalue) \n #print 'The Qtable is',self.Qvalue\n if (len(temp_action)!=0):\n #print \"I found a greedy action\" \n next_action['vol'] = temp_action[0]\n next_action['price']=temp_action[1]\n else: \n next_action=self.return_random_action(thisObs)\n \n return next_action", "def learn(self, state, action, reward, next_state):\r\n\r\n \"\"\"Please Fill Your Code Here.\r\n \"\"\"\r\n self.Q[state][action] = self.Q[state][action] + self.alpha * (reward + self.gamma * max(self.Q[next_state]) - self.Q[state][action])\r\n\r\n return 0", "def test_reusing_quantum_tape(self, execute_kwargs, tol):\n a = jax.numpy.array(0.1)\n b = jax.numpy.array(0.2)\n\n dev = qml.device(\"default.qubit\", wires=2)\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a, wires=0)\n qml.RX(b, wires=1)\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n assert tape.trainable_params == [0, 1]\n\n def cost(a, b):\n # An explicit call to _update() is required here to update the\n # trainable parameters in between tape executions.\n # This is different from how the autograd interface works.\n # Unless the update is issued, the validation check related to the\n # number of provided parameters fails in the tape: (len(params) !=\n # required_length) and the tape produces incorrect results.\n tape._update()\n new_tape = tape.bind_new_parameters([a, b], [0, 1])\n return execute([new_tape], dev, **execute_kwargs)[0]\n\n jac_fn = jax.jit(jax.grad(cost))\n jac = jac_fn(a, b)\n\n a = jax.numpy.array(0.54)\n b = jax.numpy.array(0.8)\n\n # check that the cost function continues to depend on the\n # values of the parameters for subsequent calls\n res2 = cost(2 * a, b)\n expected = [np.cos(2 * a)]\n assert np.allclose(res2, expected, atol=tol, rtol=0)\n\n jac_fn = jax.jit(jax.grad(lambda a, b: cost(2 * a, b)))\n jac = jac_fn(a, b)\n expected = -2 * np.sin(2 * a)\n assert np.allclose(jac, expected, atol=tol, rtol=0)", "def create_memory_qlearner(\n dqn: nn.Module, # Callable[[Observation], QValues]\n random_action: Callable[[Observation], Action],\n optimizer: optim.Optimizer,\n discount: float = 0.99,\n epsilon: Union[float, num.Stepable] = 0.05,\n evaluation_mode: trainers.QLearningMode = trainers.QLearningMode.DOUBLE,\n optimizing_steps: int = 4,\n double_target_weight_copy_steps: int = 1000,\n memory_capacity: int = 10000,\n batch_size: int = 32,\n clip_grad_norm: Optional[float] = None,\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n) -> Explorer:\n # Enable converting from string\n evaluation_mode = trainers.QLearningMode(evaluation_mode)\n dqn.to(device=device, dtype=dtype)\n if evaluation_mode == trainers.QLearningMode.SIMPLE:\n target_dqn = None\n else:\n target_dqn = copy.deepcopy(dqn)\n\n def select_action(engine, observation):\n \"\"\"Epsilon greedy action selection.\"\"\"\n with torch.no_grad():\n dqn.eval()\n if torch.rand(1).item() < epsilon:\n return random_action(observation)\n else:\n return dqn(observation).greedy()\n\n agent = Explorer(select_action=select_action, dtype=dtype, device=device)\n trainer = trainers.create_qlearning_trainer(\n dqn=dqn,\n target_dqn=target_dqn,\n optimizer=optimizer,\n discount=discount,\n evaluation_mode=evaluation_mode,\n clip_grad_norm=clip_grad_norm,\n dtype=dtype,\n device=device,\n )\n\n @agent.on(Events.STARTED)\n def add_memory_and_trainer_to_agent(engine):\n engine.state.memory = MemoryReplay(\n T.PinIfCuda(device=device), capacity=memory_capacity\n )\n engine.state.trainer = trainer\n\n @agent.on(Events.ITERATION_COMPLETED)\n def append_transition_and_step_epsilon(engine):\n engine.state.memory.append(engine.state.transition.cpu())\n if isinstance(epsilon, num.Stepable):\n epsilon.step()\n\n @agent.on(Events.ITERATION_COMPLETED)\n @utils.every(optimizing_steps)\n def optimize(engine):\n sample_elem = engine.state.memory[0]\n dataloader = DataLoader(\n dataset=engine.state.memory,\n batch_size=batch_size,\n collate_fn=sample_elem.__class__.collate,\n shuffle=True,\n drop_last=True,\n )\n engine.state.trainer.run(dataloader)\n\n @agent.on(Events.ITERATION_COMPLETED)\n @utils.every(double_target_weight_copy_steps)\n def copy_weights(engine):\n if target_dqn is not None:\n dqn.zero_grad() # Avoid copying the gradients\n target_dqn.load_state_dict(copy.deepcopy(dqn.state_dict()))\n\n return agent", "def update_q_values(self, state, value):\n if self.prev_state is not None and self.learning:\n reward = self.reward(Game.game_state(state))\n self.q_values[self.represent_state(self.prev_state), self.prev_action] += self.alpha * (\n reward + self.gamma * value - self.prev_q_val)", "def _build_algorithm(self):\n self.optimizer = tf.train.AdamOptimizer(self._lr, epsilon=1.5e-8)\n trainable_variables = tf.trainable_variables(\"main/qnet\")\n\n # Compute the state value.\n batch_size = tf.shape(self._observation)[0]\n action_index = tf.stack([tf.range(batch_size), self._action], axis=1)\n action_q = tf.gather_nd(self._qvals, action_index)\n assert_shape(action_q, [None])\n\n # Compute back up.\n ave_q = tf.add_n(self._target_qvals) / self._n_net\n assert_shape(tf.reduce_max(ave_q, axis=1), [None])\n q_backup = tf.stop_gradient(self._reward + self._discount * (1 - self._done) * tf.reduce_max(ave_q, axis=1))\n\n # Compute loss and optimize the object.\n loss = tf.reduce_mean(tf.squared_difference(q_backup, action_q)) # 损失值。\n self._train_op = self.optimizer.minimize(loss, var_list=trainable_variables)\n\n # Update target network.\n update_target_operation = []\n for i in reversed(range(1, self._n_net)): # i=0表示最近的模型。\n with tf.control_dependencies(update_target_operation):\n update_target_operation.append(self._update_target(f\"target_{i}/qnet\", f\"target_{i-1}/qnet\"))\n\n with tf.control_dependencies(update_target_operation):\n update_target_operation.append(self._update_target(\"target_0/qnet\", \"main/qnet\"))\n\n self.update_target_op = update_target_operation\n self._log_op = {\"loss\": loss}", "def get_ddqn_targets(qsa_target, q_targets, mask, estimator, next_states):\n with torch.no_grad():\n next_q_values = estimator(next_states)\n argmax_actions = next_q_values.max(1, keepdim=True)[1]\n qsa_target[mask] = q_targets.gather(1, argmax_actions)\n return qsa_target", "def ipd(length,gamma1,epsilon1,gamma2,epsilon2):\r\n #possible previous states (what each did in the last iteration)\r\n states = [(\"*\",\"*\"),(\"C\",\"D\"), (\"C\",\"C\"), (\"D\",\"C\"), (\"D\",\"D\")]\r\n #actions: Defect or Cooperate\r\n actions = [\"D\",\"C\"]\r\n #payoff matrix (as dict)\r\n payoff = {(\"C\",\"D\"): (-3,0), (\"C\",\"C\"): (-1,-1), \r\n (\"D\",\"C\"): (0,-3), (\"D\",\"D\"): (-2,-2)}\r\n #initialize learners \r\n q1 = qLearn(states,actions,gamma1,epsilon1)\r\n q2 = qLearn(states,actions,gamma2,epsilon2)\r\n #initialize list of rewards\r\n rewards = [] \r\n #iterate through length states and run the game\r\n prevState = (\"*\",\"*\")\r\n for i in range(length):\r\n #get actions\r\n #print(\"Iteration %i:\" %i)\r\n #print(\"Previous State:\", prevState)\r\n qa1 = q1.chooseAction(prevState)\r\n qa2 = q2.chooseAction(prevState)\r\n #print(\"Player 1 Action:\",qa1)\r\n #print(\"Player 2 Action:\",qa2)\r\n \r\n #find payoff\r\n newState = (qa1,qa2)\r\n reward = payoff[newState]\r\n rewards.append(sum(reward))\r\n #print(\"Player 1 Reward:\", reward[0])\r\n #print(\"Player 2 Rewards:\", reward[1])\r\n #assign reward and update Q params\r\n q1.qUpdate(prevState,qa1,reward[0],newState)\r\n q2.qUpdate(prevState,qa2,reward[1],newState)\r\n \r\n prevState = newState\r\n #print(q1.Q)\r\n #print(q2.Q)\r\n return(rewards)", "def update_actor(self, states, dQda):\r\n dQda = np.concatenate(dQda)\r\n dQda = np.reshape(dQda, (-1, self.action_space))\r\n states = np.concatenate(states)\r\n states = np.reshape(states, (-1, self.feature_number))\r\n self.sess.run(self.optimize_actor,\r\n feed_dict={self.actor_input: states, self.dQda_placeholder: dQda})", "def getQValue(self, state, action):\n #print \"getQValue in ApproximateQAgent\"\n\n \"*** YOUR CODE HERE ***\"\n weights = self.getWeights()\n features = self.featExtractor.getFeatures(state, action, self)\n\n value = 0\n\n #print \"FEATURES: \", features\n #print \"WEIGHTS: \", weights\n\n for feature in features:\n value += features[feature]*weights[feature]\n return value\n #util.raiseNotDefined()" ]
[ "0.6463897", "0.6316078", "0.61735415", "0.61302334", "0.611619", "0.6111125", "0.60791606", "0.605653", "0.60473", "0.60191995", "0.6005732", "0.59470886", "0.59439164", "0.59313065", "0.5930287", "0.59225607", "0.5906226", "0.5905362", "0.5883166", "0.58092326", "0.5794577", "0.57814616", "0.5764787", "0.5735027", "0.5714025", "0.5697961", "0.5697287", "0.5676409", "0.56667167", "0.56648624", "0.5647522", "0.56373084", "0.56269", "0.5622852", "0.5609541", "0.55969876", "0.5583897", "0.55737597", "0.55731523", "0.55671585", "0.55615395", "0.5559826", "0.5557424", "0.5553451", "0.5547945", "0.5541026", "0.5537491", "0.5533138", "0.55308527", "0.55295795", "0.5528878", "0.55255646", "0.55251247", "0.55135", "0.5513403", "0.55125594", "0.5512448", "0.55066335", "0.55017734", "0.54934484", "0.5491572", "0.54837143", "0.5480122", "0.5460613", "0.54601735", "0.54581195", "0.54527646", "0.54433817", "0.544299", "0.54318607", "0.54318607", "0.53919744", "0.53877425", "0.5380055", "0.5379854", "0.53721935", "0.53720367", "0.5367768", "0.53674436", "0.5366636", "0.5363867", "0.53591466", "0.5358599", "0.5358414", "0.53582484", "0.53422755", "0.5334783", "0.5326983", "0.5317556", "0.53119236", "0.52975774", "0.52851826", "0.52814686", "0.527422", "0.5271555", "0.5267203", "0.52659696", "0.5265114", "0.5254048", "0.52491885", "0.5248761" ]
0.0
-1
Sending data through a queue
def send_message(self, data): self.agent_msg_queue.put(data) self._send_counter += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_msg(self, my_queue, my_msg):", "def send_data(queue, data):\n for obj in data:\n queue.put(dumps(obj, protocol=-1))", "def add_to_send_queue(self, data):\n if self.socket is not None:\n self.send_queue.put(data)", "def send_message(data):\n if data is not None:\n logging.debug(data)\n queue.on_next(data)", "def send(self, item):\n self.input_queue.put(item)", "async def sender(self):\n out = await self.output_queue.get()\n if not out.ready():\n logger.info(\">>> Requeuing {}\".format(out))\n await self.output_queue.put(out)\n await asyncio.sleep(0.05)\n return\n if out.expired():\n logger.info(\">>> Discarding {}\".format(out))\n out.discarded = True\n return\n content = [out.content] if type(out.content) is str else out.content\n logger.info(\">>> Sending:\\n{}\".format(content))\n await self.websocket.send(json.dumps(content))\n out.sent = True\n await asyncio.sleep(len(content) * 0.5)", "def send_queue(command, data, port=9755):\n qdata = {'type': command, 'data': data}\n qstr = json.dumps(qdata).encode('utf8')\n if len(qstr) > MAX_SIZE:\n raise RuntimeError('Data portion too large')\n\n csocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n csocket.connect(('127.0.0.1', port))\n csocket.send(qstr)\n csocket.close()\n return", "def set_queue_data(data):\n while(not grove_queue.empty):\n grove_queue.get()\n grove_queue.put(data)", "def send(self, data: typing.Any):\n try:\n self._event_queue.put(data, block=False)\n except queue.Full as e:\n raise RuntimeError(\"Gateway queue is full - this should never happen!\") from e", "def _send(self, command, payload):\n self.work_queue_client.send(command, payload)", "def send_message(self, message):\n self.client.queue.put(message)", "def _start_send_to_queue(self):\n while True:\n message_to_send = str(self.send_message_queue.get())\n if self.verbose: print \"Sending\", message_to_send\n send_msg(self.TCPSock, message_to_send)\n # self.TCPSock.send(message_to_send)", "def queue(self,data):\n prefix = '%s:'%(str(len(data)))\n size = len(prefix) + len(data) + 1\n\n # large pieces of data incur significant overhead to copy\n # into a netstring\n if size > 4096:\n self.outbox.append(prefix)\n self.outbox.append(data)\n self.outbox.append(',')\n self.tcpbox.append(prefix)\n self.tcpbox.append(data)\n self.tcpbox.append(',')\n else:\n data = '%s%s,' % (prefix,data)\n self.outbox.append(data)\n self.tcpbox.append(data)", "def write_data(self,queue):\n raise NotImplementedError('Abstract method has not been implemented')", "def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)", "def enqueue(self,e):", "def send(self, msg):\n self.house.PLM.send_queue.put( msg )", "def enqueue(self, data, flag='process'):\n self.Q['in'].put((data, flag))", "def _send(self) -> None:\n if not self.connected or now() < self.next_send:\n return\n self.next_send += self.poll_interval\n buff = []\n while self.outq:\n msg_id, tag, data = self.outq.popleft()\n buff.append(pickle.dumps((msg_id, tag, data)))\n if buff:\n stream = b\"\".join(buff)\n self.endpoint.sendall(stream)", "def put_data(self, id, data):\n self.msg_queue.put(data)", "async def queue(self, ctx):\n srv = self.get_server_dict(ctx.message.server.id)\n que = srv['queue']\n msg = self.format_song_display('▶', srv['song'][1], srv['song'][2], srv['song'][3])\n i = 1\n for item in que:\n line = self.format_song_display(i, item[1], item[2], item[3])\n i += 1\n msg += line\n await ctx.bot.send_message(ctx.message.channel, msg)", "def runQueueEnqueue(self):\n raise NotImplementedError", "def enqueue(self, record):\r\n self.queue.put_nowait(record)", "def on_data(self, data):\n if data is not None:\n # Send the data to the parent process\n logging.debug('Received raw data : ' + str(data))\n self.mp_queue.put(data)", "def send(self,msg):\n try:\n if self.mutexCmd.tryLock(100):\n self.cmd.append(msg)\n self.mutexCmd.unlock()\n #print(\"ADD TO QUEUE: {}\".format(msg))\n else:\n print(\"WARN: cmd not added to queue\")\n except Exception as e:\n print(\"ERROR:Serial:send:\",e)\n self.ConnexionError.emit(True)", "def send(self, message):\n if isinstance(message, basestring):\n self.send_queue.put(message)\n else:\n self.send_queue.put(struct.pack(\"!B\", message.type_id) +\n message.pack())", "def _put(self, item, queue):", "def send(self, data: Union[ActionEvent, TurnEvent], compression=None):\n # pause_receive is irrelevant now\n # self._pause_receive.set()\n self._send_queue.append(data)\n # super(MastermindClientUDP, self).send(JSONSerializer.serialize(data), compression)\n # self._pause_receive.clear()\n return", "def queue_communication(self, session):\n\n # Here we can queue all communication to be sent to the Client\n # Examples follow...\n session['queue'].append(GetObjects())\n session['queue'].append(DeleteObjects())\n session['queue'].append(RpcExecute())\n session['queue'].append(GetDeviceInfo())", "def test_enqueue(self):\n dest = '/queue/foo'\n frame = Frame('MESSAGE', headers={'message-id': str(uuid.uuid4())}, body='some data')\n self.store.enqueue(dest, frame)\n \n assert self.store.has_frames(dest) == True\n assert self.store.size(dest) == 1", "def send(self, message):\n\t\tmessage_string = self.send_address + \" \" + message + \" /\"\n\t\tself.add_to_queue(message_string)", "def push(self, *args, **kwargs):\n self.queue.put((args, kwargs))", "def _send(self, message):\r\n if not message:\r\n return\r\n\r\n self._maybe_print('twitch out queued: ' + message)\r\n self.buffer.append(message + \"\\n\")", "def toQueue(data):\n\n for host in settings.OTHER_HOSTS:\n settings.SENDER[host['id']].queue.put(dict(**data))", "def to_server(self, o):\n assert type(o) == str\n\n # add to queue\n self.toserverqueue.put(o, block=False)\n\n # send now, if appropriate\n if self.buffer_tx==False:\n self.periodicTimer.fireNow()", "def __init__(self):\n self.data = Queue()", "def send(self, data):", "def send_message(self, message):\n self.send_message_queue.put(message)", "def enQueue(self, data_):\n\t\tif self.isFull():\n\t\t\tprint(\"Can't insert the data in the queue: Queue Full\")\n\t\t\texit(1)\n\n\t\t## This enqueuing logic using the concept of circular\n\t\t## movement to avoid the overhead of the transfer\n\n\t\tself.rear = (self.rear + 1) % self.capacity\n\t\tself.queue[self.rear] = data_\n\t\tself.size = self.size + 1", "def putonqueue(self, nr, *args):\n self.outqueues[10-nr].put_nowait(args)\n self.tickqueue.put_nowait('go')", "def enqueue(self, name):\n pass", "def creator(data, q):\n print('Creating data and putting it on the queue')\n for item in data:\n q.put(item)", "def putonqueue(self, nr, *args):\n\n self.outqueues[nr].put_nowait(*args)\n self.tickqueue.put_nowait('go')", "def sendall(self, message):\n reg = re.compile(':::(\\d+):::')\n match = reg.match(message)\n self.test.assertIsNotNone(match)\n offset = len(match.group(0))\n\n try:\n size = int(match.group(1))\n except ValueError:\n size = None\n\n message = message[offset:]\n message = pickle.loads(message)\n self.queue.put((size, message))", "def sendData(self):\n\n while self.keep_running:\n self.connection = pika.BlockingConnection(self.params)\n self.channel = self.connection.channel()\n\n # The fanout exchange broadcasts all the messages it receives to all the queues it knows.\n # That is what we need for our logger.\n self.channel.exchange_declare(exchange=self.logName,\n exchange_type='fanout')\n\n # Publish the data to the exchange\n self.channel.basic_publish(exchange=self.logName,\n routing_key='',\n body=self.message)\n\n self.connection.close()\n\n time.sleep(self.loopTime)", "def run(self):\n\n def callback(ch, method, properties, body):\n json_body = json.loads(body)\n self.buffer.append(Fvalue.fromdict(json_body))\n\n sleep(5) # We introduce a slight delay to let the RabbitMQ container to accept connections\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.mq_host,port=self.mq_port))\n channel = connection.channel()\n channel.exchange_declare(exchange=self.mq_host + '_exchange', exchange_type='direct')\n result = channel.queue_declare(exclusive=True)\n queue_name = result.method.queue\n channel.queue_bind(exchange=self.mq_host + '_exchange',\n queue=queue_name,\n routing_key=self.routing_key)\n channel.basic_consume(callback,queue=queue_name,no_ack=True)\n channel.start_consuming()", "def _send(self, message):\n logger.info(message)\n self.buffer.put(message)", "def _get_data_from_send_queue(self):\n size = self.send_queue.qsize()\n if size > 1:\n data = b''.join([self.send_queue.get() for _ in range(size)])\n else:\n data = self.send_queue.get()\n return data", "def _client(self):\n while True:\n body = self.queue.get(True)\n print \"Sending %s bytes (%s/%s)\" % (len(body), self.queue.qsize(), self.queue.maxsize)\n\n try:\n req = urllib2.Request(self.endpoint, body)\n urllib2.urlopen(req).read()\n except:\n print \"Cannot send request. Retrying in 5 seconds\"\n print_exception(*sys.exc_info())\n print \"continuing...\"\n self.enqueue(body)\n sleep(5)", "def enqueue(self,value):\n pass", "def testQueueSend(self):\n self.mgr.queueMsg(37)\n self.assertTrue( self.mgr.msgQueue.empty() )\n self.v.send_mavlink.assert_called_with(37)", "def _update(self, data):\n if len(data) > 0:\n for q in self._queues.values():\n q.put(data)", "def send_message(socket, queue_lock, message_queue):\n while not exit_flag: \n queue_lock.acquire() # do we need this because it is the only thread accessing the queue?\n if not message_queue.empty():\n message = message_queue.get()\n socket.send(json.dumps(message))\n queue_lock.release()", "def example(example_object, queue):\n queue.put(example_object)", "async def send(self):", "def process_queue(self):\n while not self.msg_queue.empty():\n addr, msg = self.msg_queue.get()\n if msg:\n print(msg)\n self.broadcast(addr, msg)\n else:\n self.clean(addr)", "def enqueue(self, item):\n self.queue.append(item)", "def enqueue(self, item):\n self.queue.append(item)", "def send_message_to_server(self, key, value):\n if self.from_kivy_queue is None:\n return\n self.from_kivy_queue.put((key, value))", "def controls():\n\n context = zmq.Context()\n\n print(\"Transmitting commands to process.\")\n socket = context.socket(zmq.REQ)\n rc = socket.connect(\"ipc:///tmp/mail_queue_ipc\")\n #print(rc)\n\n\n for request in range(2):\n print(\"Sending request %s\" % request)\n socket.send(b\"insert\")\n\n message = socket.recv()\n print(\"Recieved reply %s [ %s ]\" % (request, message))\n time.sleep(1)", "def enqueue(self, data):\n if data:\n self.chunks.append(data)\n self.chunks_size += len(data)", "def dequeue(self):", "def testQueueMsg(self):\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(4)\n self.assertFalse( self.mgr.msgQueue.empty() )\n self.assertTrue(self.mgr.isGoproBusy)", "def _send_data(self):\n pass", "def example(example_object, queue_object):\n queue_object.put(example_object)", "def callback(self, data):\n\n self.connection = pika.BlockingConnection(self.params)\n self.channel = self.connection.channel()\n\n # The fanout exchange broadcasts all the messages it receives to all the queues it knows.\n # That is what we need for our logger.\n # Tony changed to 'topic' to work with Kuilin's group\n self.channel.exchange_declare(exchange=self.logName,\n exchange_type='topic',\n auto_delete=True)\n\n #TONY WAS HERE\n #CONVERT THE DATA BEFORE SENDING\n #this extracts the data to a tuple\n data_tuple = struct.unpack(\"<hddhdddddddddddd\", data)\n #convert tuple to string and remove the parentheses on the ends\n data_to_send = str(data_tuple).strip(\"()\")\n\n # Publish the data to the exchange\n self.channel.basic_publish(exchange=self.logName,\n routing_key=self.RoutingKey,\n body=data_to_send) #used to be body=data (from Pilot)\n\n #tony was here\n #print(\"Sending: %r via %r and %r\" % (data,self.logName,self.RoutingKey))\n\n self.connection.close()", "def send(self, data):\n self.sent.put(data)", "def process_queued_msg(self):\n try:\n while not self.queue.empty():\n port, tbl = self.queue.get()\n reveived_port = self.switches[port.neighbor_switch_dpid].ports[port.neighbor_port_no]\n self.tbl.update_by_neighbor(reveived_port, port, tbl)\n self.deploy_routing_table()\n except:\n pass", "def sendData(self):\n out = ''\n for line in self.sendq:\n line = 'put ' + line + self.tagstr\n out += line + '\\n'\n LOG.debug('SENDING: %s', line)\n\n if not out:\n LOG.debug('no data in sendq?')\n return\n\n try:\n if self.dryrun:\n print out\n else:\n self.cissd.sendall(out)\n self.sendq = []\n # If an exception occurs, try sending data again next time\n except socket.error, msg:\n LOG.error('failed to send data: %s', msg)\n try:\n self.cissd.close()\n except socket.error:\n pass\n self.cissd = None", "def add_to_queue(self, msg):\n if not self.queue.full():\n self.queue.put(msg)", "def put(self, conn):\r\n self.queue.append((conn, time.time()))", "def send(self, ard: Arduino_functions.Arduino, write_msg_str):\n self.worker_send.queue.put((ard, write_msg_str))\n\n # Trigger processing the worker_send queue.\n self.worker_send.qwc.wakeAll()", "def callback(address, queue, data):\n\t\t\tqueue.put(bytes('ACK', 'utf8'))\n\t\t\tself._server_in_buf.append(data)", "def _msg_server(self, message):\r\n \r\n # Add message to the outgoing queue.\r\n self._outgoinglock.acquire()\r\n self._outgoing.append(message)\r\n self._outgoinglock.release()", "def q_send(send, in_string):\n self.message_q.append(in_string)", "def send(self):\n \n # Check that we have something to send\n if len(self.items) > 0:\n \n # If no items 'sent' or 'playing', send next item in queue\n sent_items = [item for item in self.playlist_store.find({'status':'sent'})]\n playing_items = [item for item in self.playlist_store.find({'status':'playing'})]\n \n # Look for any expired items in playing\n expired = False\n for item in playing_items:\n end_date = item['start_date'] + datetime.timedelta(seconds=item['track']['track']['length'])\n expired = expired or end_date < datetime.datetime.now()\n \n # Assume we send nothing\n send_item = False\n # Conditions under which we send...\n # 1. Nothing sent, and nothing playing\n send_item = send_item or (len(sent_items) == 0 and len(playing_items) == 0)\n # 2. Nothing sent, and something expired marked as playing\n send_item = send_item or (len(sent_items) == 0 and len(playing_items) > 0 and expired)\n \n if send_item:\n \n # Send next item in queue\n self.current_item = self.items.pop(0)\n print \" [x] Sending %r\" % (self.current_item['track']['track']['name'],)\n \n # Send using the broadcast exchange (Pub/Sub)\n self.amqp_primary_channel.basic_publish(exchange=self.amqp_broadcast_exchange,\n routing_key='',\n body=json.dumps({'_id': str(self.current_item['_id']),\n 'track': self.current_item['track'],\n 'from': self.current_item['from']}),\n properties=pika.BasicProperties(\n content_type=\"application/json\",\n delivery_mode=2))\n \n # Mark item as sent\n self.current_item['status'] = 'sent'\n self.playlist_store.update({'_id': self.current_item['_id']}, self.current_item)\n \n elif len(sent_items) == 0 and len(playing_items) > 0 and not expired:\n # TODO\n # If something playing and nothing sent, set up timer\n # timer = Timer(self.current_item['track']['track']['length'], self.next)\n # timer.start()\n pass", "def __init__(self): \n self.queue = []", "def msg(self, target, message):\n self.server.message_queue.put(('tests!tests@tes.t', target, message))", "def sendCommand(self, command:str=\"?\"):\n self.commandQueue.put(command)\n #self.queueLock.release()\n pass", "async def _queue(self, msg):\n if msg.voice_client is not None:\n if msg.guild.id in self.player:\n if self.player[msg.guild.id]['queue']:\n emb = discord.Embed(\n colour=self.random_color, title='queue')\n emb.set_footer(\n text=f'Command used by {msg.author.name}', icon_url=msg.author.avatar_url)\n for i in self.player[msg.guild.id]['queue']:\n emb.add_field(\n name=f\"**{i['author'].author.name}**\", value=i['title'], inline=False)\n return await msg.send(embed=emb, delete_after=120)\n\n return await msg.send(\"No songs in queue\")", "def enqueue(self, val):\r\n self.queue.append(val)", "def __init__(self):\r\n self.queue = []", "def __init__(self):\r\n self.queue = []", "def enqueue(self, message):\n self.pending_messages += [message]", "def test_the_queue_enqueue(the_queue):\n the_queue.enqueue(2)\n assert the_queue._new_dll.head.data == the_queue._new_dll.tail.data == 2", "def add_to_queue(self, word):\n self.q.put(word)\n print(\"word \\'{}\\' added in clients queue\".format(word))", "def worker(self):\n while True:\n item,index = self.inbound.get()\n if index is None:\n self.buffer.append(item)\n self.index.value = self.index.value + 1 #index of next item for buffer\n if len(self.buffer)>self.size:\n del self.buffer[0]\n self.newitem.put(None)\n else:\n self.buffer[len(self.buffer)+(index - self.index.value)] = item", "def send_command_queue(command_queue):\n for command in command_queue:\n Game._send_string(command)\n\n Game._done_sending()", "def queue(self, queue_, value):\n while not self.closed:\n try:\n queue_.put(value, block=True, timeout=1)\n return\n except queue.Full:\n continue", "def enqueue(self, message, qat, nbf):\n dst = self.abspath('%s.tmp' % str(message.id))\n with open(dst, 'wb') as f:\n f.write(nbf.to_bytes(8, 'big'))\n f.write(message.encode())\n f.flush()\n os.fsync(f.fileno())\n\n os.rename(dst, self.abspath('%s.amqp' % str(message.id)))", "def send(self, s):\n try:\n next_msg = self.mq[s.fileno()].get_nowait()\n except QueueEmpty:\n #print >>sys.stderr, 'output queue for', s.getpeername(), 'is empty'\n # No messages waiting so stop checking for writability.\n self.outputs.remove(s)\n except SocketError:\n self.remove(s)\n else:\n self.log(\"Sending '%s' to %s\", repr(next_msg), self.conns[s].addr)\n s.send(next_msg)", "def _process_whisper_queue(self, whisper_queue):\n while True:\n if len(whisper_queue) > 0:\n whisper_tuple = (whisper_queue.pop())\n self.ts.send_whisper(whisper_tuple[0], whisper_tuple[1])\n time.sleep(.5)", "def send(self, task_msg, iterate=1):\n # Type casting; Task -> python dict\n task_msg = task_msg.__dict__\n task_msg['dispatcher_id'] = self.uid\n # Start your result manager and workers before you start your dispatchers\n for idx in range(iterate):\n task_msg['task_id'] = self.new_tid()\n task_msg['scheduled'] = str(time.time())\n logging.debug(\"msg pushed:{} to {}\".format(task_msg, self.q_name))\n self.mq_send(task_msg)", "def test_process_message_queue(self):\n t = threading.Thread(target=self.handle_message_queue)\n t.start()\n\n self.dut._process_message_queue()\n\n t.join()", "def send(self, msg):\r\n self.msgLock.acquire()\r\n self.msg.append(msg)\r\n self.numMsg += 1\r\n self.msgLock.release()", "def __send(self):\r\n self.msgLock.acquire()\r\n if self.numMsg > 0:\r\n self.socket.send(self.msg.pop(0))\r\n self.numMsg -= 1\r\n self.msgLock.release()", "def callback(address, queue, data):\n queue.put(bytes('ACK', 'utf8'))\n self._server_in_buf.append(data)", "def send_command_queue(self, command_queue):\n for command in command_queue:\n self._send_string(command)\n\n self._done_sending()", "def send_command_queue(self, command_queue):\n for command in command_queue:\n self._send_string(command)\n\n self._done_sending()", "def tryToSend( self, message ):\n if self.free:\n self.free = False\n yield self.writeToSerial( message )\n elif len( self.queue ) > MAX_QUEUE_SIZE:\n raise Exception( 'Queue size exceeded')\n else: self.queue.append( message )" ]
[ "0.7874383", "0.7698461", "0.7406423", "0.7342643", "0.7194812", "0.7130858", "0.7115332", "0.7114572", "0.7103772", "0.70748067", "0.70712423", "0.70644194", "0.69962597", "0.6987004", "0.6939428", "0.6920469", "0.6905456", "0.68996507", "0.68771124", "0.68152535", "0.6804161", "0.6798476", "0.67858934", "0.6743814", "0.6691898", "0.6686302", "0.66813517", "0.66733915", "0.6661136", "0.66597915", "0.6655142", "0.66488075", "0.66323704", "0.66264933", "0.66108197", "0.66042674", "0.66035336", "0.6581525", "0.6524583", "0.65127933", "0.65125334", "0.6505729", "0.65005374", "0.64909476", "0.64894044", "0.64887977", "0.64857703", "0.64756066", "0.6475163", "0.6474265", "0.64729905", "0.6464555", "0.64554965", "0.6443268", "0.64296055", "0.6422431", "0.64214355", "0.64214355", "0.6417666", "0.6403937", "0.63922465", "0.63765943", "0.6374857", "0.6373126", "0.6364249", "0.63630307", "0.63617986", "0.63576347", "0.63525623", "0.63470584", "0.63367593", "0.63274986", "0.6317991", "0.6315138", "0.63119054", "0.63115746", "0.6309293", "0.62987584", "0.6297779", "0.62947303", "0.6287086", "0.6284315", "0.6284315", "0.6281971", "0.6276024", "0.6274725", "0.62727094", "0.6271683", "0.6271446", "0.62617683", "0.62600756", "0.62574875", "0.6253519", "0.62522626", "0.62472403", "0.6246933", "0.6244116", "0.6241194", "0.6241194", "0.62342924" ]
0.6878409
18
Send detection data and return status
def send_detection_data(self, image_width, image_height, image, detection_result): if self._send_buffer.full() is True: log_error("Send detection data failed for buffer is full") return False image_data = None if isinstance(image, AclImage): image_data = DataBuf(image.data(), image.size).copy_to_local() elif isinstance(image, np.ndarray): image_data = image else: log_error("Invalid data to send") return False request_msg = pm.image_frame_request(image_width, image_height, image_data.tobytes(), detection_result) self.send_message(request_msg) self._send_buffer.put(image_data) self._release_send_success_data() return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_image(self, image_width, image_height, image):\n detection_result = []\n return self.send_detection_data(image_width, image_height, image, detection_result)", "def test_http_classifier(self):\n \n msg = \"\"\n \n files = 0\n tp = 0\n fp = 0\n tn = 0\n fn = 0\n\n self.addr = \"http://\" + self.Helpers.confs[\"cnn\"][\"api\"][\"server\"] + \\\n ':'+str(self.Helpers.confs[\"cnn\"][\"api\"][\"port\"]) + '/Inference'\n self.headers = {'content-type': 'image/jpeg'}\n\n for data in os.listdir(self.testing_dir):\n if os.path.splitext(data)[1] in self.valid:\n \n response = self.send_request(self.testing_dir + \"/\" + data)\n\n msg = \"\"\n if response[\"Diagnosis\"] == \"Positive\" and \"_1.\" in data:\n tp += 1\n msg = \"Acute Lymphoblastic Leukemia correctly detected (True Positive)\"\n elif response[\"Diagnosis\"] == \"Positive\" and \"_0.\" in data:\n fp += 1\n msg = \"Acute Lymphoblastic Leukemia incorrectly detected (False Positive)\"\n elif response[\"Diagnosis\"] == \"Negative\" and \"_0.\" in data:\n tn += 1\n msg = \"Acute Lymphoblastic Leukemia correctly not detected (True Negative)\"\n elif response[\"Diagnosis\"] == \"Negative\" and \"_1.\" in data:\n fn += 1\n msg = \"Acute Lymphoblastic Leukemia incorrectly not detected (False Negative)\" \n \n files += 1\n \n self.Helpers.logger.info(msg)\n print()\n time.sleep(7)\n \n self.Helpers.logger.info(\"Images Classifier: \" + str(files))\n self.Helpers.logger.info(\"True Positives: \" + str(tp))\n self.Helpers.logger.info(\"False Positives: \" + str(fp))\n self.Helpers.logger.info(\"True Negatives: \" + str(tn))\n self.Helpers.logger.info(\"False Negatives: \" + str(fn))", "def sendDetection(self, idData, classes, aux=None):\n self.dp.URL = self.URL\n self.dp.sendDetection(classifier=self.Config[\"MACHINE_NAME\"], \n idData=idData, classes=classes, aux=aux)", "def run(self):\n if self.stream:\n while True:\n try:\n ret, frame = self.stream.read()\n if ret is True:\n # TODO: replace by a real function that send frame to detection model\n self.detection_model.send_image(image=frame)\n if self.show_in_window:\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n except KeyboardInterrupt:\n self.stream.release()\n cv2.destroyAllWindows()\n self.log.close()\n return None\n except Exception as e:\n self.stream.release()\n cv2.destroyAllWindows()\n self.log.write('Error:Unexpected Error happened:\\n {}'.format(e))\n self.log.close()\n return None\n else:\n self.log.write(\"Error initializing stream....\\n\")\n self.log.close()\n return None", "def status_check_callback(self, req, res):\n try:\n res.single_camera_status = 1\n res.stereo_camera_status = 1\n res.lidar_status = 1\n if self.camera_buffer.read_buffer is not None \\\n and isinstance(self.camera_buffer.read_buffer, list):\n if len(self.camera_buffer.read_buffer) == 2:\n res.stereo_camera_status = 0\n elif len(self.camera_buffer.read_buffer) == 1:\n res.single_camera_status = 0\n if self.lidar_buffer.read_buffer is not None:\n res.lidar_status = 0\n return res\n except Exception as ex:\n self.get_logger().error(f\"Failed to get sensor data status: {ex}\")", "def image_test_case(img, expected_results, info_string):\n global passed_count, failed_count\n\n path = TEST_IMGS + img\n\n print(\"\\n\\nTEST: {}\".format(info_string))\n print(\"\\nTesting image handling of {}\".format(path))\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.connect((HOST, PORT))\n\n with open(path, 'rb') as f:\n img_bytes = f.read()\n\n sock.send(START)\n sock.send(GPS)\n sock.send(b'51.5138')\n sock.send(LONG)\n sock.send(b'-0.09847899999999754')\n sock.send(SOF)\n sock.send(img_bytes)\n sock.send(END_MESSAGE)\n\n response_1 = sock.recv(4)\n response_2 = sock.recv(4)\n responses = [response_1, response_2]\n\n for expected in expected_results:\n if expected not in responses:\n print(\"\\n\\tResult: FAILED.\")\n print(\"Expected server response {}. Received {}.\".format(\n expected_results, responses))\n failed_count += 1\n return\n\n print(\"\\n\\tResult: PASSED.\\n\")\n passed_count += 1", "def getStatus():\n return json.dumps({'camera': Camera.status(), 'rover': rover.status()}), 200", "def run(self):\n # Wait for the 'shot' message ready\n self.wait_for_messages()\n # Send the initial states to the server\n self.send_shape_and_states()\n # Wait for the 'method' message ready\n self.wait_for_messages()\n\n # Send the measurement angles to the server\n for y in range(self.__depth):\n self.send_angle_bulks(y)\n\n # Obtain the measurement outcomes\n result = self.get_classical_output()[::-1]\n self.send_back(\n 'local',\n self.__wrap_shot_message(\n 'setResult',\n {'result': result, 'shot': self.__shots},\n )\n )", "async def detect(self, request: Request) -> Response:\n raw_data = await request.body()\n as_str = raw_data.decode(\"utf-8\")\n\n try:\n body = orjson.loads(as_str)\n except orjson.JSONDecodeError as e:\n raise InferenceError(\"Unrecognized request format: %s\" % e)\n\n request_handler = get_request_handler(\n Protocol(self.alibi_detect_settings.protocol), body\n )\n request_handler.validate()\n input_data = request_handler.extract_request()\n\n y = await self.predict_fn(input_data)\n output_data = orjson.dumps(y, option=orjson.OPT_SERIALIZE_NUMPY)\n\n return Response(content=output_data, media_type=\"application/json\")", "def process_payload(payload):\n\n # Convertion of payload string to image array for opencv\n ret, img = make_image(payload)#ret is 0 when conversion is successful or 1 when not\n result='Unable to detect'\n if ret == 0:\n cv2.imwrite('received.png', img)\n try:\n roi = extract_roi_2(img)\n \n result = detect(roi) \n \n #write_characters(roi)\n\n except:\n result = \"----------------\"\n # # When roi is extracted its a 2d array \n \n return result", "def video_test():\n r = request\n # convert string of image data to uint8\n nparr = np.fromstring(r.data, np.uint8)\n # decode image\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n # do some fancy processing here....\n\n # build a response dict to send back to client\n response = {'message': 'image received. size={}x{}'.format(img.shape[1], img.shape[0])\n }\n print(response)\n # encode response using jsonpickle\n response_pickled = jsonpickle.encode(response)\n cv2.imwrite(\"1.jpg\", img)\n print(\"done\")\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")", "def process_image(self, data):\n try:\n\n # Convert the image from ROS format to OpenCV format\n # 'bgr8' means it will encode as 8-bit values in BGR channels\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n # Apply a threshold to your image\n cv_image = self.bound_green_object(cv_image)\n # Display the modified image\n cv2.imshow('picture', cv_image)\n cv2.waitKey(3)\n except CvBridgeError, e:\n rospy.loginfo(e)", "def serve_inference_requests():\n global image_queue\n\n with tf.Session() as sess:\n while True:\n image_data = image_queue.get()\n\n tensor = sess.graph.get_tensor_by_name('final_result:0')\n predictions = sess.run(tensor, {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n\n top_k = predictions.argsort()[-NUM_PREDICTIONS:][::-1]\n\n human_string = labels[top_k[0]]\n score = predictions[top_k[0]]\n logging.info('%s classified with score %.5f', human_string, score)\n\n emit_image = False\n if human_string != 'nothing':\n emit_image = True\n logging.debug('emitting image cause %s was detected', human_string)\n elif score <= config['inference']['threshold']:\n emit_image = True\n logging.debug('emitting image cause score %.5f is below threshold of %s',\n score, config['inference']['threshold'])\n else:\n logging.debug('image not emitted, cause nothing was detected with a probability of %.5f',\n score)\n\n if emit_image:\n mqtt_publish(image_data)\n else:\n save_image(image_data)", "def callback(self, data):\n\n # Convert sensor_msgs.msg.Image into OpenDR Image\n image = self.bridge.from_ros_image(data)\n rospy.loginfo(\"image info: {}\".format(image.numpy().shape))\n\n # Run pose estimation\n boxes = self.object_detector.infer(image, threshold=0.1, keep_size=False)\n\n # Get an OpenCV image back\n image = np.float32(image.numpy())\n\n # Convert detected boxes to ROS type and publish\n ros_boxes = self.bridge.to_ros_boxes(boxes)\n if self.bbox_publisher is not None:\n self.bbox_publisher.publish(ros_boxes)\n rospy.loginfo(\"Published face boxes\")\n\n # Annotate image and publish result\n # NOTE: converting back to OpenDR BoundingBoxList is unnecessary here,\n # only used to test the corresponding bridge methods\n odr_boxes = self.bridge.from_ros_boxes(ros_boxes)\n image = draw_bounding_boxes(image, odr_boxes, class_names=self.class_names)\n if self.image_publisher is not None:\n message = self.bridge.to_ros_image(np.uint8(image))\n self.image_publisher.publish(message)\n rospy.loginfo(\"Published annotated image\")", "def send(self, data, status=\"CON\"):\n return self.c.sendall(pack(data, status=status))", "def _send(self):\n executor_id = self.status['executor_id']\n job_id = self.status['job_id']\n call_id = self.status['call_id']\n act_id = self.status['activation_id']\n\n if self.status['type'] == '__init__':\n init_key = create_init_key(executor_id, job_id, call_id, act_id)\n self.internal_storage.put_data(init_key, '')\n\n elif self.status['type'] == '__end__':\n status_key = create_status_key(executor_id, job_id, call_id)\n dmpd_response_status = json.dumps(self.status)\n drs = sizeof_fmt(len(dmpd_response_status))\n logger.info(\"Storing execution stats - Size: {}\".format(drs))\n self.internal_storage.put_data(status_key, dmpd_response_status)", "def callback(self, data):\n\n # Convert sensor_msgs.msg.Image into OpenDR Image\n image = self.bridge.from_ros_image(data)\n self.ID = self.ID + 1\n # Get an OpenCV image back\n image = np.float32(image.numpy())\n name = str(f\"{self.ID:02d}\"+\"_single.jpg\")\n cv2.imwrite(os.path.join(self.args.path_in, name), image)\n\n if (self.ID == 5):\n # Run SyntheticDataGeneration\n self.synthetic.eval()\n self.ID = 0\n # Annotate image and publish results\n current_directory_path = os.path.join(self.args.save_path, str(\"/Documents_orig/\"))\n for file in os.listdir(current_directory_path):\n name, ext = os.path.splitext(file)\n if ext == \".jpg\":\n image_file_savepath = os.path.join(current_directory_path, file)\n cv_image = cv2.imread(image_file_savepath)\n cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)\n if self.image_publisher is not None:\n image = Image(np.array(cv_image, dtype=np.uint8))\n message = self.bridge.to_ros_image(image, encoding=\"bgr8\")\n self.image_publisher.publish(message)\n for f in os.listdir(self.args.path_in):\n os.remove(os.path.join(self.args.path_in, f))", "def ping():\n health = AutoGluonClassifierService.load_model() is not None # You can insert a health check here\n\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')", "def remote_status():", "def detect(self):\n # process the input video and get the attributes:\n self.process_video()\n\n # build a rcnn/ yolov5 predictor:\n self.build_predictor()\n\n \n # assert not os.path.isfile(args.output_file), \"File with the name %s already exists\"%args.output_file\n # build the writer with same attributes:\n self.vid_writer = cv2.VideoWriter(self.output, self.fourcc, self.fps, (self.w, self.h))\n\n # inference time:\n start = time.time()\n print(\"Started inference\\n\")\n \n # progress bar using tqdm:\n pbar = tqdm(total=self.nframes)\n\n while(self.cap.isOpened()):\n ret, frame = self.cap.read()\n if ret == False:\n break # when the last frame is read \n\n # different formats of results:\n if self.library == \"yolov5\":\n # predict and bring the outputs to cpu:\n results = self.predictor(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) # convert to RGB\n predictions = results.xyxy[0].cpu()\n # find the instance indices with person:\n person_idx = predictions[:,5] == self.label_dict[\"person\"]\n # extract the corresponding boxes and scores:\n boxes = predictions[person_idx,:4].numpy()\n probs = predictions[person_idx,4].numpy()\n\n if self.library == \"detectron2\":\n # predict and bring the outputs to cpu:\n results = self.predictor(frame) # RGB conversion done automatically in detectron\n predictions = results[\"instances\"].to(\"cpu\")\n # find the instance indices with person:\n person_idx = [predictions.pred_classes == self.label_dict[\"person\"]]\n # extract the corresponding boxes and scores:\n boxes = predictions.pred_boxes[person_idx].tensor.numpy()\n probs = predictions.scores[person_idx].numpy()\n\n # draw boxes and write the frame to the video:\n if len(boxes): # check whether there are predictions\n box_frame = self.draw_person_boxes(frame, boxes, probs)\n else:\n box_frame = frame\n self.vid_writer.write(box_frame)\n\n pbar.update(1)\n pbar.close()\n\n # release the video capture object and write object:\n self.cap.release()\n self.vid_writer.release()\n\n print(\"Inferene on the video file took %0.3f seconds\"%(time.time()-start))", "def main_recognition():\n if request.method == 'POST':\n # print(request.url)\n # stream = BytesIO(request.data)\n # image = Image.open(stream).convert(\"RGBA\")\n # path = 'C:/Users/13/Documents/FRS_v1/path.png'\n # image = image.save(path)\n # stream.close()\n #df = faces_info_export(path)\n print(request.url)\n stream = BytesIO(request.data)\n img_pil=Image.open(stream).convert(\"RGB\")\n stream.close()\n img_cv=np.array(img_pil)\n try:\n df = faces_info_export(img_cv)\n return df.to_json(orient='index')\n except SystemError as er:\n \tprint(er)\n \treturn json.dumps({'msg':'error'})\n except AttributeError as er:\n \tprint(er)\n \treturn json.dumps({'msg':'error'})\n if request.method == 'GET':\n # ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}\n df = faces_info_export(\"C:/Users/13/Documents/FRS_v1/test_image.jpg\")\n return df.to_json(orient='index')", "def detect_image_client(img):\n rospy.wait_for_service('detect_service') # attendende che il servizio sia pronto\n rospy.loginfo(\"Detection service invoked\")\n try:\n detect_service = rospy.ServiceProxy('detect_service', Detect) #istanzia il proxy al servizio detect_service\n msg = detect_service(img) # invoca il servizio con un'istanza di Image per ottenere un'istanza di DetectResponse\n return msg.det # restituisce l'istanza di tipo Detection2DArray prelevandola dall'oggetto DetectResponse\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e)", "def detect(model, dataset_dir, subset):\n print(\"Running on {}\".format(dataset_dir))\n\n # Create directory\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n submit_dir = \"submit_{:%Y%m%dT%H%M%S}\".format(datetime.datetime.now())\n submit_dir = os.path.join(RESULTS_DIR, submit_dir)\n os.makedirs(submit_dir)\n\n # Read dataset\n dataset = TamperDataset()\n dataset.load_tamper(dataset_dir, subset)\n dataset.prepare()\n # Load over images\n submission = []\n f1 = 0\n print(len(dataset.image_ids))\n # for image_id in dataset.image_ids:\n # # Load image and run detection\n # image = dataset.load_image(image_id)\n # # Detect objects\n # r = model.detect([image], verbose=0)[0]\n\n # # Encode image to RLE. Returns a string of multiple lines\n # source_id = dataset.image_info[image_id][\"id\"]\n # rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n # submission.append(rle)\n # # Save image with masks\n\n # N = r[\"scores\"].shape[0]\n # if not N:\n # \tH, W, C = image.shape\n # \tmask = np.zeros((H,W))\n\n \t\n # else:\n\n # H, W, C = image.shape\n\n # idx = np.argsort(-r[\"scores\"])\n # mask = r[\"masks\"][:,:,idx[0]].astype(np.float32)\n\n # bbox = r[\"rois\"][idx[0], :4]\n\n # y1, x1, y2, x2 = bbox\n\n\n\n # mask = dense_crf(image, mask)\n\n # mask = np.where(mask >= 0.5, 255, 0)\n\n # H, W, C = image.shape\n\n # full_mask = np.zeros((H, W))\n # full_mask[y1:y2, x1:x2] = mask\n\n for image_id in dataset.image_ids:\n # Load image and run detection\n image = dataset.load_image(image_id)\n # ela=dataset.load_ela(image_id)\n # Detect objects\n # r = model.detect([image],[ela], verbose=0)[0]\n r = model.detect([image],verbose=0)[0]\n\n # Encode image to RLE. Returns a string of multiple lines\n source_id = dataset.image_info[image_id][\"id\"]\n rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n submission.append(rle)\n # Save image with masks\n\n N = r[\"scores\"].shape[0]\n if not N:\n H, W, C = image.shape\n mask = np.zeros((H,W))\n\n \n else:\n idx = np.argsort(-r[\"scores\"])\n mask = r[\"masks\"][:,:,idx[0]].astype(np.uint8)\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n\n annotation = dataset.load_annaation(image_id)\n annotation = np.where(annotation >= 0.5, 1, 0) \n f = get_FM(mask, annotation)\n f1 += f\n\n print(f1/len(dataset.image_ids))\n\n\n\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n # visualize.display_instances(\n # image, r['rois'], r['masks'], r['class_ids'],\n # dataset.class_names, r['scores'],\n # show_bbox=False, show_mask=False,\n # title=\"Predictions\")\n # plt.savefig(\"{}/{}.png\".format(submit_dir, dataset.image_info[image_id][\"id\"]))\n\n # Save to csv file\n # submission = \"ImageId,EncodedPixels\\n\" + \"\\n\".join(submission)\n # file_path = os.path.join(submit_dir, \"submit.csv\")\n # with open(file_path, \"w\") as f:\n # f.write(submission)\n print(\"Saved to \", submit_dir)", "def testSendState(self):\n self.mgr.enabled = 1\n self.mgr.model = MODEL_HERO3PLUS_BLACK\n self.mgr.status = STATUS_GOPRO_CONNECTED\n self.mgr.isRecording = False\n self.mgr.captureMode = CAPTURE_MODE_VIDEO\n self.mgr.videoFormat = VIDEO_FORMAT_NTSC\n self.mgr.videoResolution = 3\n self.mgr.videoFrameRate = 1\n self.mgr.videoFieldOfView = 2\n self.mgr.videoLowLight = True\n self.mgr.photoResolution = 1\n self.mgr.photoBurstRate = 2\n self.mgr.videoProtune = True\n self.mgr.videoProtuneWhiteBalance = 2\n self.mgr.videoProtuneColor = 1\n self.mgr.videoProtuneGain = 3\n self.mgr.videoProtuneSharpness = 2\n self.mgr.videoProtuneExposure = 1\n\n # Send old spec version\n # 2 unsigned shorts for a header, 26 unsigned bytes, then 5 unsigned shorts\n pkt1 = struct.pack('<IIBBBBBBBBBBBBBBBBBBBBBBBBBBHHHHH', app_packet.GOPRO_V1_STATE, 36, \\\n GOPRO_V1_SPEC_VERSION,\n self.mgr.model,\n self.mgr.status,\n self.mgr.isRecording,\n self.mgr.captureMode,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0\n )\n\n # send new spec version\n # 2 unsigned shorts for a header, 26 unsigned bytes, then 5 unsigned shorts\n pkt2 = struct.pack('<IIBBBBBBBBBBBBBBBBBBBBBBBBBBHHHHH', app_packet.GOPRO_V2_STATE, 36, \\\n GOPRO_V2_SPEC_VERSION,\n self.mgr.model,\n self.mgr.status,\n self.mgr.isRecording,\n self.mgr.captureMode,\n self.mgr.videoFormat,\n self.mgr.videoResolution,\n self.mgr.videoFrameRate,\n self.mgr.videoFieldOfView,\n self.mgr.videoLowLight,\n self.mgr.photoResolution,\n self.mgr.photoBurstRate,\n self.mgr.videoProtune,\n self.mgr.videoProtuneWhiteBalance,\n self.mgr.videoProtuneColor,\n self.mgr.videoProtuneGain,\n self.mgr.videoProtuneSharpness,\n self.mgr.videoProtuneExposure,\n self.mgr.enabled,\n 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0\n )\n\n self.mgr.sendState()\n call1 = call(pkt1)\n call2 = call(pkt2)\n self.mgr.shotMgr.appMgr.sendPacket.assert_has_calls([call1, call2])", "def detect():\n pass", "def start(self):\n self.activateLog()\n self.preLoad()\n\n self.running = True\n\n logging.debug('Start detection of {} in {}.'.format(self.Config[\"CLASSES\"], \n self.__class__.__name__))\n failedSend = 0\n while self.running:\n gdList = []\n _classes = None\n _idData = 0\n\n try:\n if not self.Standalone:\n gdList = self.bring(controller=self.Controller, device=self.Device, \n limit=self.Limit, lastTime=self.lastTime)\n self.lastTime = gdList[0]['timeQuery']\n else:\n gdList = self.testData()\n failedSend = 0 \n except:\n failedSend += 1\n logging.exception(\n 'Unexpected error getting data from pool: {}. Controller: {}, Device: {}, Limit: {}.'.format(\n self.URL, self.Controller, self.Device, self.Limit))\n if failedSend > 2 and not self.dp.isLive():\n logging.error('Pool no found {} will shutdown.'.format(self.__class__.__name__))\n self.stop()\n break\n continue\n\n for gd in gdList[1:]:\n _classes = []\n try:\n _classes, _aux = self.predict(gd)\n _idData = gd['id']\n except:\n logging.exception(\n 'Unexpected error in prediction from classifier: {} ({}).'.format(\n self.__class__.__name__, self.Config[\"MACHINE_NAME\"]))\n \n try:\n if not self.Standalone and len(_classes) > 0:\n self.sendDetection(_idData, _classes, _aux)\n else:\n self.showData(gd, _classes, _aux)\n failedSend = 0 \n except:\n failedSend += 1\n logging.exception(\n 'Unexpected error sending data from classifier: {} ({}).'.format(\n self.__class__.__name__, self.Config[\"MACHINE_NAME\"]))\n\n if failedSend > 2 and not self.dp.isLive():\n logging.error('Pool no found {} will shutdown.'.format(self.__class__.__name__))\n self.stop()\n break", "def camera_status():\n # Do command\n consoleOutput = exec_console_command(constants.cameraCheck)\n\n # Parse output for results\n status = False\n feedbackOutput = constants.cameraCheckOff\n\n if \"Nikon Corp.\" in consoleOutput:\n status = True\n feedbackOutput = constants.cameraCheckOn\n\n # Encode to JSON\n return feedbackOutput, status", "def detect_object():\n response = None\n try:\n # logger.info(request.Form)\n if request.files['base_image'] is not None:\n base_img = cv2.imdecode(np.fromstring(request.files['base_image'].read(), np.uint8), cv2.IMREAD_UNCHANGED)\n\n if base_img is not None:\n response = predictionService.verify(base_img=base_img)\n else:\n response = BaseResponse(code=400, reason='base_image cannot be null')\n except Exception as e:\n logger.error(e)\n response = BaseResponse(code=500, reason=\"Internal server error occurred. refer to logs\")\n\n return response.toJSON()", "def image_cb(self, msg):\n if self.waypoints is None:\n return\n if self.state_count >= self.state_count_threshold and time.time() - self.last_detection_time < self.traffic_light_detection_interval:\n return\n if time.time() - self.last_tl_off_time < self.traffic_light_off_idle_interval:\n if self.loglevel >= 5:\n rospy.logdebug(\"No detection %f %f %f\", time.time(), self.last_tl_off_time, self.traffic_light_off_idle_interval)\n return\n\n self.last_detection_time = time.time()\n self.has_image = True\n self.camera_image = msg\n light_wp, state = self.process_traffic_lights()\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n if self.state != state:\n self.state_count = 1\n self.state = state\n else:\n self.state_count += 1\n if self.state_count >= self.state_count_threshold:\n if state == TrafficLight.GREEN and self.last_state in (TrafficLight.RED, TrafficLight.YELLOW):\n self.last_tl_off_time = time.time()\n self.last_state = self.state\n self.last_wp = light_wp\n self.last_msg = state_msg = TrafficLightStatus()\n state_msg.tlwpidx = light_wp\n state_msg.state = state\n self.upcoming_red_light_pub.publish(state_msg)\n elif self.last_msg: # have not reached the threshold\n if self.car_wpidx < self.last_msg.tlwpidx + self.traffic_light_over_waypoints: \n # keep sending previous message when we are still close to the current traffic light\n self.upcoming_red_light_pub.publish(self.last_msg)\n else: # for other locations, clear traffic light status\n self.last_msg.tlwpidx = -1\n self.last_msg.state = TrafficLight.UNKNOWN\n self.upcoming_red_light_pub.publish(self.last_msg)\n self.last_msg = None\n if self.loglevel >= 4:\n rospy.loginfo(\"Curr Light_wp: %d, state: %d, global state: %d, last Light_wp: %d, state count: %d\", light_wp, state, self.state, self.last_wp, self.state_count)", "def callback(self, data):\n try:\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n self.image_sub.unregister()\n\n except CvBridgeError as e:\n rospy.logerr(e)\n (rows, cols, channels) = cv_image.shape\n #result = cv2.fastNlMeansDenoisingColored(cv_image, None, 20, 10, 7, 21)\n image = cv_image\n # Resize a 720x1280 image to 360x640 to fit it on the screen\n \"\"\"resized_image = cv2.resize(image, (720 / 2, 1280 / 2))\n cv2.imshow(\"/eyrc/vb/camera_1/image_raw\", resized_image)\n rospy.loginfo(self.get_qr_data(image))\"\"\"\n _,threshold = cv2.threshold(image, 70, 255, cv2.THRESH_TRUNC)\n self.get_qr_data(threshold)\n cv2.waitKey(3)", "def test_get_status(self):\n # Index of status bit to flip\n for app_num, servo_type in app_nr.items():\n self.cmd_num += 1\n # Retrieve the positions directly from the server (without ACS)\n command = headers[0] + commands[6] + ':%d=' %self.cmd_num + str(app_num) + closers[0]\n\n found = False\n while(not found):\n self.sockobj.sendall(command)\n data = \"\"\n while(True):\n data += self.sockobj.recv(1)\n if closers[0] in data:\n if ':%d=' %self.cmd_num in data:\n found = True\n break\n else:\n data = \"\"\n\n if data.startswith(\"!NAK\"):\n continue\n status_obj = self.__dict__[servo_type]._get_status()\n acs_status, completion = status_obj.get_sync()\n\n if(completion.code):\n print \"\\nError code found in status...\"\n continue\n try:\n # Retrieve the message header\n sent, answer = data.split(\">\")\n status = int(answer.strip())\n except:\n continue\n\n self.assertAlmostEqual(acs_status, status, places=1)", "def send_status(self):\n self.data = {\n 'value': '',\n 'state': self.state,\n }\n event_manager.device_changed(self)", "def detected(self) -> bool:\n\t\treturn self._raw_result['data']['detected']", "def yolo_detection(raw_image):\n class_ids = []\n confidences = []\n boxes = []\n height , width ,c= raw_image.shape\n blob = cv2.dnn.blobFromImage(raw_image, 0.00392, (416,416), (0,0,0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(output_layers)\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.4:\n center_x = int(detection[0]*width)\n center_y = int(detection[1]*height)\n w = int(detection[2]*width)\n h = int(detection[3]*height)\n ##Rectangle Draw\n topleft_x = int(center_x-(w/2))\n topleft_y = int(center_y-(h/2))\n\n boxes.append([topleft_x,topleft_y,w,h])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\n #DISPLAY DETECTION\n total_detections = len(boxes)\n for i in range(total_detections):\n if i in indexes:\n topleft_x, topleft_y, w,h = boxes[i]\n label = detection_classes[class_ids[i]]\n cv2.rectangle(raw_image, (topleft_x,topleft_y), (topleft_x+w,topleft_y+h), (0,100,255), 1)\n cv2.putText(raw_image, label, (topleft_x, topleft_y),cv2.FONT_HERSHEY_COMPLEX,1,(0,165,255))\n\n\n return raw_image", "def run(self):\n self.debug(__name__ + \".run(): self.threadName=\" + str(self.threadName) + \"\\n\")\n self.debug(__name__ + \".run(): self.statusFile=\" + str(self.statusFile) + \"\\n\")\n self.debug(__name__ + \".run(): self.recvData=\" + str(self.recvData) + \"\\n\")\n self.debug(__name__ + \".run(): self.socketConn=\" + str(self.socketConn) + \"\\n\")\n\n status = True\n data = self.getFileData()\n self.mySocketObj.serverSend(self.socketConn, data)\n if self.socketConn: self.socketConn.close()\n # self.updateCounts()\n self.status = status\n if status:\n self.appendMsg(__name__ + \".run(): Completed successfully for \" + str(self.threadName) + \"\\n\")\n else:\n self.appendMsg(__name__ + \".run(): Failed for \" + str(self.threadName) + \"\\n\")\n # Endif", "def run(self):\n while True:\n ret, frame = self.classification()\n # valid frame\n if ret == True:\n # output the recognized face\n if self.video_out != None:\n self.video_out.display(frame)\n if self.pic_out != None:\n self.pic_out.save_frame(frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n if self.video_out != None:\n cv2.destroyAllWindows()", "def deskcheck():\n try:\n user_id = authenticate_token(request)\n user = is_user(user_id)\n\n if user:\n # Checks if image file is received\n if request.files.get('image'):\n # Image is of type FileStorage, so it can be read directly\n image = request.files['image']\n files = [('image', image.read())]\n # Sends request to ODAPI\n r = requests.post(ODAPI_URL+'detections', files=files)\n if r.status_code == 200:\n # Return json of request to client\n data = r.json()\n return jsonify(data), 200\n raise Exception(\"Unsuccessful attempt to detect objects\")\n return jsonify({ 'message': 'No image sent' }), 400\n \n return jsonify({'user_id': user_id, 'message': \"access denied, invalid user.\" }), 403\n except (MaxRetryError, requests.ConnectionError, requests.ConnectTimeout) as e:\n return jsonify({ 'message': 'Could not connect to ODAPI.' }), 500\n except Exception as e:\n print(traceback.format_exc())\n return jsonify({ 'message': e.args }), 500", "async def detected(self, message, flags):\n # Get guild preferences from db.sqlite\n select_preferences_table = \"\"\"\n SELECT channel\n FROM preferences\n WHERE GuildID=?\"\"\"\n columns, prefs = self.bot.connection.execute_query(\n select_preferences_table, \"rr\",\n message.guild.id\n )\n preferences = dict(zip(columns, prefs[0]))\n # Get notification channel preferences from guild preferences\n if preferences is None:\n channel = message.channel\n else:\n channel = discord.utils.get(\n message.guild.channels,\n id=preferences[\"channel\"]\n )\n if channel is None:\n channel = message.channel\n # Send notifying embed to specified channel\n embed = discord.Embed(\n title=\"Ghost Ping Detected :no_entry_sign: :ghost:\",\n color=0x0000ff\n )\n fields = {\n \"Member\": message.author.name, \"Message\": message.content,\n \"Channel\": message.channel.name\n }\n fields = {**fields, **flags}\n for field in fields:\n embed.add_field(name=field, value=fields[field])\n embed.set_footer(\n text=f\"Detect At: {message.created_at.strftime('%D %T')}\")\n await channel.send(embed=embed)", "def classifier():\n\tprint(\"Classifying\")\n\t#initialize important variables\n\tminConfidence = 0.5\n\tthresholdValue = 0.3\n\t\n\t\"\"\"\n\tfile = request.files#['image']\n\tfile.save(\"./classifier_image.jpg\")\n\tframe = cv2.imread(\"./classifier_image.jpg\")\n\t\"\"\"\n\tfile = request.json\n\tframe = np.array(file[\"Frame\"], dtype = \"uint8\") \n\n\t#file = request.files['image']\n\t#file.save(\"./classifier_image.jpg\")\n\t#frame = cv2.imread(\"./classifier_image.jpg\")\n\t#file = request.json\n\t#frame = np.array(file[\"contour\"], dtype=\"uint8\")\n\t\n\t#Get Image dimensions\n\timage = cv2.copyMakeBorder(frame, 30, 30, 30, 30, cv2.BORDER_CONSTANT, value=255)\n\t(H, W) = image.shape[:2]\n\t\n\t#Get the output layers parameters\n\tln = net.getLayerNames()\n\tln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\t\n\t#Create a blob to do a forward pass\n\tblob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\tnet.setInput(blob)\n\t#print(H, W)\n\tlayerOutputs = net.forward(ln)\n\tprint(type(net))\n\tboxes = []\n\tconfidences = []\n\tclassIDs = []\n\tfor output in layerOutputs:\n\t\tprint(\"detecting\")\n\t\t#loop over each detection\n\t\tfor detection in output:\n\t\t\t# extract the class ID and confidence (i.e., probability) of\n\t\t\t# the current object detection\n\t\t\tscores = detection[5:]\n\t\t\tclassID = np.argmax(scores)\n\t\t\tconfidence = scores[classID]\n\n\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t# probability is greater than the minimum probability\n\t\t\tif confidence > minConfidence:\n\t\t\t\t# scale the bounding box coordinates back relative to the\n\t\t\t\t# size of the image, keeping in mind that YOLO actually\n\t\t\t\t# returns the center (x, y)-coordinates of the bounding\n\t\t\t\t# box followed by the boxes' width and height\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t# use the center (x, y)-coordinates to derive the top and\n\t\t\t\t# and left corner of the bounding box\n\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t# update our list of bounding box coordinates, confidences,\n\t\t\t\t# and class IDs\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tclassIDs.append(classID)\n\n\t# apply non-maxima suppression to suppress weak, overlapping bounding\n\t# boxes\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, minConfidence, thresholdValue)\n\n\t# ensure at least one detection exists\n\tif len(idxs) > 0:\n\t\toutput = json.load(open(outputFile))\n\t\t# loop over the indexes we are keeping\n\t\tfor i in idxs.flatten():\n\t\t\t# extract the bounding box coordinates\n\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\tprint(LABELS[classIDs[i]], output[LABELS[classIDs[i]]]+1, confidences[i])\n\t\t\toutput[LABELS[classIDs[i]]]+=1\n\t\t\n\t\tjson.dump(output, open(outputFile, \"w\"))\n\t\treturn LABELS[classIDs[i]]\n\telse:\n\t\treturn Response(status=200)", "def sendStatusKeys(self, cmd):\n\n cmd.inform('cameraName=%s; readNoise=%0.2f' % (self.name, self.readNoise))", "def sendStatusKeys(self, cmd):\n\n cmd.inform('cameraName=%s; readNoise=%0.2f' % (self.name, self.readNoise))", "def sendStatusKeys(self, cmd):\n\n cmd.inform('cameraName=%s; readNoise=%0.2f' % (self.name, self.readNoise))", "def ping():\n health = (\n ScoringService.get_predictor_model() is not None\n ) # You can insert a health check here\n\n status = 200 if health else 404\n return flask.Response(response=\"\\n\", status=status, mimetype=\"application/json\")", "def testSuccess(self):\n seq_num = 9\n request = struct.pack(HEADER_FMT, REQUEST_TYPE, seq_num)\n reply = self.sendAndReceive(request)\n reply_type, replied_seq_num = struct.unpack(HEADER_FMT,\n reply[0:HEADER_SIZE])\n self.assertEqual(REPLY_TYPE, reply_type)\n self.assertEqual(seq_num, replied_seq_num)\n metrics = json.loads(reply[HEADER_SIZE:])\n self.assertEqual([], metrics['Components'])", "async def get_status():", "def image_cb(self, msg):\n rospy.logdebug(\"TLDetector.image_cb\")\n self.__has_image = True\n self.__camera_image = msg\n\n cv_image = self.__bridge.imgmsg_to_cv2(msg, \"bgr8\")\n light_wp, state = self.__process_traffic_lights()\n if self.__mode == LABEL_MODE and not self.__classification_done and state != 4:\n self.__classification_done = self.__light_classifier.save_image(\n cv_image, state\n )\n if self.__classification_done:\n rospy.loginfo(\"TLDetector.image_cb: Done generating labels.\")\n\n \"\"\"\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n \"\"\"\n self.__publish_traffic_light_state(light_wp, state)", "def send_verification(self):\n pass", "def send_request(self, img_path):\n\n self.Helpers.logger.info(\"Sending request for: \" + img_path)\n \n _, img_encoded = cv2.imencode('.png', cv2.imread(img_path))\n response = requests.post(\n self.addr, data=img_encoded.tostring(), headers=self.headers)\n response = json.loads(response.text)\n \n return response", "def receive_image(self):\n code = self.socket.recv(1)\n self.verify_img_code(code)\n if code[0] == codes['timeout']:\n print(\"Ocurrió un timeout en la conexión\")\n self.close_connection()\n idpokemon = bytes_to_int(self.socket.recv(1))\n self.verify_pokemon(idpokemon)\n tam_image = bytes_to_int(self.socket.recv(4))\n f = open(\"../..\" + str(idpokemon) + \".png\", 'wb')\n l = 1\n while(l):\n l = self.socket.recv(1024)\n f.write(l)\n print(\"Se guardó una imagen del pokémon capturado en el archivo \" +\n str(idpokemon) + \".png.\")\n f.close()\n\n print(\"Sesión terminada.\")\n reply = self.socket.recv(1)\n self.close_connection()", "def fetch_status():\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((GEARMAND_HOST, GEARMAND_PORT))\n log_verbose('Connected to Gearmand at %s:%s' % (GEARMAND_HOST, GEARMAND_PORT))\n except socket.error, e:\n collectd.error('gearmand_info plugin: Error connecting to %s:%d - %r'\n % (GEARMAND_HOST, GEARMAND_PORT, e))\n return None\n fp = s.makefile('r')\n log_verbose('Sending info command')\n s.sendall('status\\r\\n')\n\n status = {}\n while True:\n data = fp.readline().strip()\n log_verbose('Received data: %r' % data)\n if not data or data == '.':\n break\n function, total, running, available_workers = data.split('\\t')\n status[function] = {\n 'total': total,\n 'running': running,\n 'available_workers': available_workers}\n\n s.close()\n return status", "def tesseract_recognize(): # pylint: disable=too-many-return-statements\n try:\n data = request.json\n\n ### Check options ###\n if 'options' in data:\n if not isinstance(data['options'], list) or \\\n not all(isinstance(i, string_types) for i in data['options']):\n return resp400('expected options as an array of strings')\n else:\n data['options'] = []\n\n info_option = any([op == \"-h\" or op == \"--help\" or \\\n op == \"-v\" or op == \"--version\" \\\n for op in data['options']])\n\n if not info_option:\n # @todo Accept input xml and images from HTTP and output xml in response\n\n ### Check input file ###\n if 'input_file' not in data or \\\n not isinstance(data['input_file'], string_types):\n return resp400('expected input_file as a string')\n if not os.path.isfile(data['input_file']):\n return resp400('input file not found: '+data['input_file'])\n\n ### Check output file ###\n if 'output_file' not in data or \\\n not isinstance(data['output_file'], string_types):\n return resp400('expected output_file as a string')\n\n ### Generate command list with additional options if present ###\n cmd = ['/usr/local/bin/tesseract-recognize']\n cmd.extend(data['options'])\n if not info_option:\n cmd.extend([data['input_file'], data['output_file']])\n\n ### Execute tesseract-recognize command ###\n proc = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)\n cmd_out = str(proc.stdout.read())\n proc.communicate()\n cmd_rc = proc.returncode\n\n ### Response depending on the case ###\n msg = 'command='+(' '.join(cmd))+' output='+cmd_out\n if cmd_rc != 0:\n return resp400('execution failed: '+msg+' return_code='+str(cmd_rc))\n return resp200('execution successful: '+msg)\n\n ### Catch any problem and respond a accordingly ###\n except Exception as ex: # pylint: disable=broad-except\n return resp500(str(ex)+\"\\n\"+traceback.format_exc())", "def get_light_state(self):\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n #Get classification\n tl_image_rgb, color_index = self.light_classifier.get_classification(cv_image)\n tl_cv_image = cv2.cvtColor(tl_image_rgb, cv2.COLOR_RGB2BGR)\n try:\n self.tl_detected_image_pub.publish(self.bridge.cv2_to_imgmsg(tl_cv_image, \"bgr8\"))\n except CvBridgeError as e:\n print(e)", "def send(self, data: bytes) -> int:\n ...", "def update(self):\n # Get frame from video source:\n ret, frame = self.vid.read()\n\n if ret:\n # Convert the captured frame into grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.equalizeHist(gray)\n\n # Get all faces from the video frame\n faces = self.faceCascade.detectMultiScale(gray, 1.2,5)\n\n # For each face in faces\n for (x, y, w, h) in faces:\n # Create rectangle around the face\n cv2.rectangle(frame, (x-20,y-20), (x+w+20,y+h+20), (0,255,0), 4)\n\n # Recognize the face belongs to which ID\n Id = self.recognizer.predict(gray[y:y+h,x:x+w])\n\n ### IDENTIFICATION & SOCKET CODE GOES HERE\n if Id[0] == self.user_id:\n # If the target face is found 10 times then access is granted\n self.identification_count += 1\n if self.identification_count > 10:\n self.master.switch_frame(AccessGranted)\n\n name_to_put = self.user_name\n else:\n name_to_put = \"Unknown - Access Denied\"\n\n # Put text describe who is in the picture\n cv2.rectangle(frame, (x-22,y-90), (x+w+22, y-22), (0,255,0), -1)\n cv2.putText(frame, str(name_to_put), (x,y-40), self.font, 2, (255,255,255), 3)\n\n self.after(50, self.update)", "def detect_state(self, camera, image, send_q):\n print('Therefore, should never get to this print statement')\n pass", "def ping():\n return json_response({\n 'ping': 'pong',\n 'version': __version__,\n 'imgapi': False,\n })", "def run_detect(**kwargs):\n cmd = 'python yolov3/detect.py'\n pms_list = [\n 'image_folder', 'model_def', \n 'weights_path', 'class_path', \n 'conf_thres', 'nms_thres',\n 'batch_size', 'n_cpu', \n 'img_size', 'checkpoint_model'\n ]\n call_command(pms_list, cmd, kwargs)", "def get_response_from_cv_api(data):\n url = 'https://vision.googleapis.com/v1/images:annotate?key={}'.format(API_KEY)\n\n response = requests.post(url=url, data=data, headers={'Content-Type': 'application/json'})\n\n return response", "def detector(videoframe, facedetection, maskdetection):\n (h, w) = videoframe.shape[:2]\n blobimage = cv2.dnn.blobFromImage(videoframe, 1.0, (224, 224), (104.0, 177.0, 123.0))\n\n facedetection.setInput(blobimage)\n ffinding = facedetection.forward()\n\n face_list = []\n locations = []\n predictions = []\n\n for i in range(0, ffinding.shape[2]):\n credence = ffinding[0, 0, i, 2]\n if credence > 0.6:\n case = ffinding[0, 0, i, 3:7] * np.array([w, h, w, h])\n (x_start, y_start, x_end, y_end) = case.astype(\"int\")\n (x_start, y_start) = (max(0, x_start), max(0, y_start))\n (x_end, y_end) = (min(w - 1, x_end), min(h - 1, y_end))\n\n image = videoframe[y_start:y_end, x_start:x_end]\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (224, 224))\n image = img_to_array(image)\n image = preprocess_input(image)\n face_list.append(image)\n locations.append((x_start, y_start, x_end, y_end))\n\n if len(face_list) > 0:\n face_list = np.array(face_list, dtype=\"float32\")\n predictions = maskdetection.predict(face_list, batch_size=32)\n return (locations, predictions)", "def send(self, data):", "def extract_detections(self):\n self.rescue_model.setInput(self.human_blob)\n self.predictions = self.rescue_model.forward()", "def detect(self, input_image):\n self.t.start()\n frame = self.convert_image(input_image)\n frame = cv2.pyrDown(frame)\n\n img, confidence, x, y = self.detector.detect(frame)\n print('Detection:', confidence, x, y)\n det = Target_coordinates()\n det.confidence = confidence\n det.x = x\n det.y = y\n self.pub_detection.publish(det)\n self.pub_fpv.publish(self.bridge.cv2_to_imgmsg(img))\n cv2.imwrite('frames/frame%d.jpg' % self.frame_num, img)\n self.frame_num += 1\n self.t.end()\n # Display\n cv2.imshow(self.iw, img)\n key = cv2.waitKey(30) & 0xFF\n if key == 27:\n cv2.destroyAllWindows()\n sys.exit(27)", "def post(self):\n try:\n msg = json.loads(self.request.body)\n command = msg[\"command\"]\n # start/stop data_worker\n if command == \"start\":\n message = self.start_data_worker()\n self.write({\"success\": True, \"message\": message})\n elif command == \"stop\":\n message = self.stop_data_worker()\n self.write({\"success\": True, \"message\": message})\n else:\n self.write({\"success\": False, \"message\": \"unknown command\"})\n except Exception:\n log.exception(\"Exception\")\n self.write({\"success\": False, \"message\": \"error during control\"})", "def classification(self):\n if self.video_in != None:\n ret, frame = self.video_in.get_a_frame()\n elif self.camera_in != None:\n ret, frame = self.camera_in.get_a_frame()\n if ret == True:\n # detect face\n faces = FaceModel.detect_face(self, frame)\n FaceModel.write_faces_to_file(self, frame, faces)\n status = FaceModel.face_embeddings(self, faces)\n if status == True:\n bounded_frame = self.face_prediction(frame, faces)\n # We are done with embedding and prediction.\n # We can delete the temp directory where we saved\n # the frame, so that the next frame with face\n # can be saved there\n shutil.rmtree(FaceModel.data_dir)\n os.makedirs(FaceModel.data_dir)\n return True, bounded_frame\n else:\n return True, frame\n else:\n return False, None", "def ping():\n return ping_response()", "def ping():\n return ping_response()", "def ping():\n return ping_response()", "def ping():\n return ping_response()", "def visualize_detection(self, image):\n\t\tH, W, _ = image.shape\n\t\tpos_list = self.apply_detection(image)\n\t\tdetections = {}\n\t\thasDetection = False\n\t\tfor i, L in enumerate(pos_list):\n\t\t\ttext, coordinates = L[0], L[1]\n\t\t\tCOLOR = COLORS[text]\n\t\t\tfor x, y, w, h in coordinates:\n\t\t\t\t# prune bad homography points\n\t\t\t\tif x < 0 or y < 0 or x + w > W or \\\n\t\t\t\t y + h > H or w <= 1 or h <= 1:\n\t\t\t\t\tcontinue\n\t\t\t\t# add the detection to the dict for tracking\n\t\t\t\tdetections[self.num_detect] = (x, y, w, h)\n\t\t\t\tself.detection_index[self.num_detect] = (x, y, w, h, self.num_save, text)\n\t\t\t\tself.num_detect += 1\n\t\t\t\thasDetection = True\n\t\t\t\t# if the detection is human\n\t\t\t\tif text == 'face':\n\t\t\t\t\tgender = self.genderDetect.classify(image[y:y+h, x:x+w, :])\n\t\t\t\t\tgender = 'female' if gender[0] < 0.5 else 'male'\n\t\t\t\t\tcv2.putText(image, gender, (x + w // 2 -10, y + h + 15),\n\t\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)\n\n\t\t\t\timage = cv2.rectangle(image, (x, y), (x + w, y + h), COLOR, 2)\n\t\t\t\tcv2.putText(image, text, (x, y - 5),\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)\n\t\tif hasDetection:\n\t\t\tself.detection_frames[self.num_save] = detections\n\t\tself.num_save +=1\n\t\treturn image", "def run(self):\n self.status = \"Processing\"\n start_total_time = time.time()\n while self.is_running:\n if self.video_queue.is_running:\n if self.video_queue.empty():\n if self.video_queue.thread.is_running:\n time.sleep(0.005)\n self.logger.log(0, 'VIDEO QUEUE EMPTY')\n else:\n self.finalize()\n else:\n try:\n if self.video_queue:\n img = self.video_queue.get()\n if type(img) is np.ndarray:\n start_time = time.time()\n self.logger.log(0, \"TIME AFTER CURRENT_TIME {}\".format( time.time()-start_time ))\n gender_video_predict = self.face_gender_detector.detect_genders_from_img(img)\n if gender_video_predict:\n self.logger.log(0, \"FACES DETECTED. TIME {}\".format( time.time()-start_time ))\n final_gender = gender_video_predict[0][\"gender\"]\n dict_detection = OrderedDict(\n [('frame', self.actual_frame_number),\n ('gender', final_gender)])\n self.results.append(dict_detection)\n self.actual_frame_number += 1\n self.logger.log(0, \"TIME AFTER dict_detection {}\".format( time.time()-start_time ))\n self.logger.log(0, \"TIME AFTER write_results {}\".format( time.time()-start_time ))\n self.progress = self.update_progress()\n self.logger.log(0, \"TIME AFTER update_progress {}\".format( time.time()-start_time ))\n total_time = time.time() - start_total_time\n self.logger.log(\n 10, \"PROGRESS: {}; TIME ELAPSED: {}; E.T.A: {}\".format(\n self.progress, \n timedelta(seconds=int(total_time)),\n timedelta(\n seconds=int(total_time*100/self.progress) - int(total_time))))\n except:\n self.status = \"Failed\"\n self.logger.error(\n 'Unexpected error : {}'.format(\n traceback.format_exc()))\n self.finalize()\n break\n else:\n self.logger.info('Queue has stopped')\n self.finalize()\n break\n self.status = \"Completed\"\n self.logger.info(f\"Analysis of video {self.video_queue.path} has been completed\")\n save_results(self.results, \"/home/visiona2/code/gender_equality_api/src/gender_equality/\")", "def send(self, data):\n return False", "def process_hub_reply(self, hub_reply):\n\n # Typical response from hub is \"OK\" if there are no user or\n # automated librian requests. Almost all responses are just \"OK\"\n # therefore the default process_hub_reply is \"pass\"\n # TODO Respond to hub repies if they are other than 'OK'\n # for example, push \"send 10 frames\" request onto deque\n # and then add \"do requested extra frames\" to detectors loop\n # so that images get sent even though there is no routine reason\n pass", "def analyze():\n body = request.get_json()\n if (body == None):\n data = {\n \"message\": \"malformed json body\"\n }\n response = app.response_class(\n response=json.dumps(data),\n status=400,\n mimetype='application/json'\n )\n\n \"\"\"\n # Sample code\n # `min_poi` means the tolerance in matching strategy, a hyper-parameter\n # high `min_poi` will lead to strict matching strategy, causing possible failure in reg proc.\n # user_model includes (reg_ratio, reg_kp, reg_feat)\n # you can debug yourself to see the shape. They are all ND-array in numpy format\n # The easiest way to storage user model is to dump them locally and using hash to name them.(np.load/save)\n user_model, status, status_info = authModule.register(image_list, min_poi=6)\n # Then save the user model to the database.\n \"\"\"\n return response\n\n handwriting_content = body[\"handwriting\"]\n logging.info(\"handwriting received is {}\", handwriting_content)\n cvImg = img_converter.readb64(handwriting_content)\n\n logging.info(\"getting the information {}\", cvImg)\n image_list = [cvImg]\n user_model, status, status_info = authModule.register(image_list, min_poi=6)\n \n resp = {\n \"user_model\": trsltlyr.serialize(user_model)\n }\n return jsonify(resp), 200", "def handle(self):\n global latest_status\n data = self.request[0]\n socket = self.request[1]\n logging.info(\"Received {} bytes from {}\".format(len(data), self.client_address[0]))\n jss = interface.joystick_status_pb2.JoystickStatus()\n jss.ParseFromString(data)\n sent = jss.sent.ToDatetime()\n if not latest_status:\n latest_status = jss\n else:\n if latest_status.sent.ToDatetime() < sent:\n latest_status = jss\n else:\n logging.warning(\"Discarded stray package.\")\n ack = interface.joystick_status_pb2.JoystickAck()\n ack.sent.CopyFrom(jss.sent)\n ack.received.GetCurrentTime()\n response = ack.SerializeToString()\n socket.sendto(response, self.client_address)", "def status(self, code, content_length=None):", "def send_photo(self,filename):\n device = Device()\n mc = device.get_MAC_address()\n mac= f\"{mc}\"\n #headers = {'content-type': 'application/json'}\n files = {\"media\" : open(filename,'rb')}\n content = {\"MAC_address\":mac}\n print(mac)\n r = requests.post(f\"http://{self.webserver_address}api/user/notify\", data = content, files = files) \n msg = r.text\n print(msg)\n if(r.status_code == 200):\n return True\n return False", "def onPing(self, payload):", "def _r_send_result(self, response, protocol):\n #print(\"Send result: %s\" % result)\n protocol.send_message(response)", "def process_image(self):\n\n detect.main(self.nn_args)", "def detection_cam(network_path, xml_path):\n\n files = os.listdir(network_path)\n\n networks = [load_network(network_path + files[k]) for k in range(len(files))]\n\n cap = cv2.VideoCapture(0)\n\n known_images = load_vector_database(\"P:/coding_weeks/machine_learning/repo/database/training_database.vdb\")\n\n known_labels = []\n\n for label in known_images:\n known_labels.append(label)\n\n while True:\n # Capture image par image\n ret, frame = cap.read()\n\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n boxes, faces = face_detection(rgb, xml_path)\n\n names = []\n\n for face in faces:\n face = cv2.resize(face, (128, 128))\n face = cv2.cvtColor(face, cv2.COLOR_RGB2GRAY)\n vector_list = hog(face, orientations=8, pixels_per_cell=(8, 8), cells_per_block=(1, 1))\n\n vector = numpy.zeros((len(vector_list), 1))\n\n for k in range(len(vector_list)):\n vector[k, 0] = vector_list[k]\n\n # guess = network.forward_propagation(vector)\n #\n # max_index = 0\n # max_value = guess[0, 0]\n #\n # for k in range(len(known_labels)):\n # if guess[k, 0] > max_value:\n # max_index = k\n # max_value = guess[k, 0]\n #\n # if max_value < 0.3:\n # names.append(\"UNKNOWN\" + str(max_value))\n #\n # else:\n # names.append(known_labels[max_index] + str(max_value))\n #\n # print(\"GUESS {} | TRUSTED {}\".format(known_labels[max_index], str(100.0 * max_value)[:5]))\n\n labels = []\n\n for network in networks:\n guess = network.forward_propagation(vector)\n\n max_index = 0\n max_value = guess[0, 0]\n\n for k in range(len(known_labels)):\n if guess[k, 0] > max_value:\n max_index = k\n max_value = guess[k, 0]\n\n labels.append(known_labels[max_index])\n\n labels.sort()\n\n d = {}\n\n for label in labels:\n if label not in d:\n d[label] = 1\n else:\n d[label] += 1\n\n max = 0\n label = \"\"\n\n for l in d:\n if d[l] > max:\n max = d[l]\n label = l\n\n if max >= 0.8 * len(files):\n names.append(label)\n else:\n names.append(\"UNKNOWN\")\n\n for ((x_beginning, y_beginning, face_width, face_height), name) in zip(boxes, names):\n cv2.rectangle(frame, (x_beginning, y_beginning), (x_beginning + face_width, y_beginning + face_height), (0, 255, 0), 2)\n\n cv2.putText(frame, name, (x_beginning, y_beginning), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)\n\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()", "def _get_detections(args, generator, model, score_threshold=0.05, max_detections=100, save_path=None):\n all_detections = [[None for i in range(generator.num_classes()) if generator.has_label(i)] for j in range(generator.size())]\n\n detection_out = np.zeros([generator.size(),512,512,3])\n # detection_out = np.zeros([generator.size(),512,512])\n attention_out = np.zeros([generator.size(),512,512])\n mask_out = np.zeros([generator.size(),512,512])\n\n for i in tqdm(range(generator.size()), desc='Running network: '):\n raw_image = generator.load_image(i)\n # image = np.expand_dims(raw_image.copy(), axis=-1)\n # image = np.repeat(image, 3, axis=-1)\n # image = generator.preprocess_image(image)\n image = generator.preprocess_image(raw_image.copy())\n image, scale = generator.resize_image(image)\n\n if keras.backend.image_data_format() == 'channels_first':\n image = image.transpose((2, 0, 1))\n\n # run network\n # boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))[:3]\n boxes, scores, labels, masks, attention_map = model.predict_on_batch(np.expand_dims(image, axis=0))\n # print('scores:', scores.shape)\n # print('labels',labels.shape)\n\n # correct boxes for image scale\n boxes /= scale\n\n # select indices which have a score above the threshold\n indices = np.where(scores[0, :] > score_threshold)[0]\n # print('indices', indices)\n scores = scores.numpy()\n boxes = boxes.numpy()\n labels = labels.numpy()\n masks = masks.numpy()\n attention_map = attention_map.numpy()\n # select those scores\n scores = scores[0][indices]\n\n # find the order with which to sort the scores\n scores_sort = np.argsort(-scores)[:max_detections]\n # print(scores_sort)\n\n # select detections\n image_boxes = boxes[0, indices[scores_sort], :]\n image_scores = scores[scores_sort]\n image_labels = labels[0, indices[scores_sort]]\n image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1)\n\n if save_path is not None:\n draw_annotations(raw_image, generator.load_annotations(i), label_to_name=generator.label_to_name)\n draw_detections(raw_image, image_boxes, image_scores, image_labels, score_threshold=args.detection_threshold, label_to_name=generator.label_to_name)\n\n\n detection_out[i, :, :] = raw_image\n\n attention_map[np.where(attention_map < args.attention_threshold)] = 0\n # attention_out[i, :, :] = cv2.flip( cv2.resize(np.squeeze(np.uint8(attention_map * 255)), (origin_shape[1], origin_shape[0])), 0)\n attention_out[i, :, :] = cv2.resize(np.squeeze(np.uint8(attention_map * 255)), (512, 512))\n\n masks[masks < args.segmentation_threshold] = 0\n masks = cv2.resize(np.squeeze(np.uint8(masks * 255)), (512, 512))\n\n mask_out[i, :, :] = masks\n\n # copy detections to all_detections\n for label in range(generator.num_classes()):\n if not generator.has_label(label):\n continue\n\n all_detections[i][label] = image_detections[image_detections[:, -1] == label, :-1]\n if save_path is not None:\n detection_out = sitk.GetImageFromArray(detection_out)\n sitk.WriteImage(detection_out, os.path.join(save_path, 'detection_result.nii.gz'))\n\n attention_out = sitk.GetImageFromArray(attention_out)\n sitk.WriteImage(attention_out, os.path.join(save_path, 'attention_result.nii.gz'))\n\n mask_out = sitk.GetImageFromArray(mask_out)\n sitk.WriteImage(mask_out, os.path.join(save_path, 'masks_result.nii.gz'))\n\n return all_detections", "def send_image_frame_REP_watcher(self, text, image):\n\n self.REQ_sent_time.append(datetime.utcnow()) # utcnow 2x faster than now\n try:\n hub_reply = self.sender.send_image(text, image)\n except: # add more specific exception, e.g. ZMQError, after testing\n print(\"Exception at sender.send_image in REP_watcher function.\")\n self. fix_comm_link()\n self.REP_recd_time.append(datetime.utcnow())\n return hub_reply", "def test_send_network(self) :\n symbol = 'A' \n oProtocol = Protocol(symbol,mode=\"client\",debug=self.debug)\n command = \"N200\"\n message = oProtocol.send(command)\n #if message['status'] is False :\n #print(\"\\n*** ERROR : test_send_network : {}\".format(message['notify']))\n\n #Pour enregistrer les traces d'appels de fonctions dans le fichier log/client_calltrack_sorted.txt\n client_tracker_print()\n self.assertTrue( (message['status'] is not True) )", "def _handle_image(self, image_msg):\n # converting the ROS image message to CV2-image\n image = self._cv_bridge.imgmsg_to_cv2(image_msg, 'bgr8')\n\n # Skip if image is None\n if image is None:\n rospy.logdebug(\"Image content is None :(\", logger_name=\"vision\")\n return\n\n # Check if its the first image callback\n if self._first_image_callback:\n # Check if a cap may be on the camera\n self._handle_forgotten_camera_cap(image)\n\n # Instances that should be notified with the new image\n internal_image_subscribers =[\n self._field_color_detector,\n self._white_color_detector,\n self._red_color_detector,\n self._blue_color_detector,\n self._unknown_obstacle_detector,\n self._field_boundary_detector,\n self._obstacle_detector,\n self._red_obstacle_detector,\n self._blue_obstacle_detector,\n self._goalpost_detector,\n self._line_detector,\n self._ball_detector,\n self._debug_image_creator,\n ]\n\n # Distribute the image to the detectors\n # Iterate over subscribers\n for vision_object in internal_image_subscribers:\n # Send image\n vision_object.set_image(image)\n\n # Check if the vision should run the conventional and neural net part parallel\n if self._config['vision_parallelize']:\n # Create and start threads for conventional calculation and neural net\n #fcnn_thread = Thread(target=self._ball_detector.compute)\n\n conventional_thread = Thread(target=self._conventional_precalculation())\n\n conventional_thread.start()\n #fcnn_thread.start()\n\n # Wait for both threads\n conventional_thread.join()\n #fcnn_thread.join()\n else:\n # Calc conventional calculation and neural net\n self._ball_detector.compute()\n self._conventional_precalculation()\n\n ########\n # Ball #\n ########\n\n # Get a number of top balls under the field boundary, which have an high enough rating\n all_balls = self._ball_detector.get_top_candidates(count=self._max_balls)\n balls_under_field_boundary = \\\n self._field_boundary_detector.candidates_under_convex_field_boundary(\n all_balls,\n self._ball_candidate_y_offset)\n top_balls = candidate.Candidate.rating_threshold(\n balls_under_field_boundary,\n self._ball_candidate_threshold)\n # check whether there are ball candidates\n if top_balls:\n # Convert ball cancidate list to ball message list\n list_of_balls = map(ros_utils.build_ball_msg, top_balls)\n # Create balls msg with the list of balls\n balls_msg = ros_utils.build_balls_msg(image_msg.header, list_of_balls)\n # Publish balls\n self._pub_balls.publish(balls_msg)\n\n # Debug draw all ball candidates\n self._debug_image_creator.draw_ball_candidates(\n all_balls,\n (0, 0, 255))\n # Debug draw possible ball candidates under the field boundary\n self._debug_image_creator.draw_ball_candidates(\n balls_under_field_boundary,\n (0, 255, 255))\n # Debug draw top ball candidate\n self._debug_image_creator.draw_ball_candidates(\n top_balls,\n (0, 255, 0),\n thickness=2)\n\n #############\n # Obstacles #\n #############\n\n # Init list for obstacle msgs\n list_of_obstacle_msgs = []\n # Add red obstacles\n list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.ROBOT_MAGENTA,\n self._red_obstacle_detector.get_candidates()))\n # Add blue obstacles\n list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.ROBOT_CYAN,\n self._blue_obstacle_detector.get_candidates()))\n # Add UFO's (Undefined Found Obstacles)\n list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.UNDEFINED,\n self._unknown_obstacle_detector.get_candidates()))\n # Build obstacles msgs containing all obstacles\n obstacles_msg = ros_utils.build_obstacle_array_msg(image_msg.header, list_of_obstacle_msgs)\n # Publish obstacles\n self._pub_obstacle.publish(obstacles_msg)\n\n # Debug draw unknown obstacles\n self._debug_image_creator.draw_obstacle_candidates(\n self._unknown_obstacle_detector.get_candidates(),\n (0, 0, 0),\n thickness=3)\n # Debug draw red obstacles\n self._debug_image_creator.draw_obstacle_candidates(\n self._red_obstacle_detector.get_candidates(),\n (0, 0, 255),\n thickness=3)\n # Debug draw blue obstacles\n self._debug_image_creator.draw_obstacle_candidates(\n self._blue_obstacle_detector.get_candidates(),\n (255, 0, 0),\n thickness=3)\n\n ########\n # Goal #\n ########\n\n # Get all goalposts under field boundary\n goal_posts = self._field_boundary_detector.candidates_under_convex_field_boundary(\n self._goalpost_detector.get_candidates(),\n self._goal_post_field_boundary_y_offset)\n\n # Get goalpost msgs and add them to the detected goal posts list\n goal_post_msgs = ros_utils.build_goal_post_msgs(goal_posts)\n # Create goalposts msg\n goal_posts_msg = ros_utils.build_goal_post_array_msg(image_msg.header, goal_post_msgs)\n # Check if there is a goal\n if goal_posts_msg:\n # If we have a goal, lets publish it\n self._pub_goal_posts.publish(goal_posts_msg)\n\n # Debug draw all goal posts\n self._debug_image_creator.draw_obstacle_candidates(\n self._goalpost_detector.get_candidates(),\n (180, 180, 180),\n thickness=3)\n # Debug draw goal posts which start in the field\n self._debug_image_creator.draw_obstacle_candidates(\n goal_posts,\n (255, 255, 255),\n thickness=3)\n\n #########\n # Lines #\n #########\n if self._use_line_points:\n # Build a LineSegmentInImage message for each linepoint\n line_points = self._line_detector.get_linepoints()\n # Create line segments\n line_segments = ros_utils.convert_line_points_to_line_segment_msgs(line_points)\n # Create line msg\n line_msg = ros_utils.build_line_information_in_image_msg(image_msg.header, line_segments)\n # Publish lines\n self._pub_lines.publish(line_msg)\n\n # Draw debug line points\n self._debug_image_creator.draw_points(\n line_points,\n (0, 0, 255))\n\n if self._use_line_mask:\n # Define detections (Balls, Goal Posts) that are excluded from the line mask\n excluded_objects = top_balls + goal_posts\n # Get line pixel mask\n line_mask = self._line_detector.get_line_mask_without_other_objects(excluded_objects)\n # Create line mask message\n line_mask_message = ros_utils.build_image_msg(image_msg.header, line_mask, '8UC1')\n # Publish line mask\n self._pub_line_mask.publish(line_mask_message)\n\n # Draw debug line mask\n self._debug_image_creator.draw_mask(\n line_mask,\n color=(255, 0, 0),\n opacity=0.8)\n\n ##################\n # Field boundary #\n ##################\n\n # Get field boundary msg\n convex_field_boundary = self._field_boundary_detector.get_convex_field_boundary_points()\n # Build ros message\n convex_field_boundary_msg = ros_utils.build_field_boundary_polygon_msg(image_msg.header, convex_field_boundary)\n # Publish field boundary\n self._pub_convex_field_boundary.publish(convex_field_boundary_msg)\n\n # Debug draw convex field boundary\n self._debug_image_creator.draw_field_boundary(\n convex_field_boundary,\n (0, 255, 255))\n # Debug draw field boundary\n self._debug_image_creator.draw_field_boundary(\n self._field_boundary_detector.get_field_boundary_points(),\n (0, 0, 255))\n\n #########\n # Debug #\n #########\n '''\n if self._config['neural_network_type'] == 'fcnn':\n # Publish fcnn output for the region of interest under the field boundary (for the world model)\n if self._ball_fcnn_publish_output:\n roi_msg = ros_utils.build_fcnn_region_of_interest(\n self._ball_detector.get_fcnn_output(),\n self._field_boundary_detector,\n image_msg.header,\n self._config['ball_fcnn_publish_field_boundary_offset'])\n self._pub_ball_fcnn.publish(roi_msg)\n\n # Publish whole fcnn output for debug purposes\n if self._publish_fcnn_debug_image:\n self._pub_debug_fcnn_image.publish(self._ball_detector.get_debug_image())\n '''\n # Check, if HSV mask images should be published\n if self._publish_HSV_mask_image:\n # Mask images\n white_mask = self._white_color_detector.get_mask_image()\n red_mask = self._red_color_detector.get_mask_image()\n blue_mask = self._blue_color_detector.get_mask_image()\n\n # Publish mask images\n self._pub_white_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, white_mask, '8UC1'))\n self._pub_red_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, red_mask, '8UC1'))\n self._pub_blue_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, blue_mask, '8UC1'))\n\n # Check, if field mask image should be published\n if self._publish_field_mask_image:\n if isinstance(self._field_color_detector, color.DynamicPixelListColorDetector):\n # Mask image\n dyn_field_mask = self._field_color_detector.get_mask_image()\n static_field_mask = self._field_color_detector.get_static_mask_image()\n # Publish mask image\n self._pub_dynamic_color_lookup_table_field_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, dyn_field_mask, '8UC1'))\n self._pub_field_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, static_field_mask, '8UC1'))\n else:\n # Mask image\n field_mask = self._field_color_detector.get_mask_image()\n # Publish mask image\n self._pub_field_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, field_mask, '8UC1'))\n\n # Check if we should draw debug image\n if self._debug_image_creator.active:\n # publish debug image\n self._pub_debug_image.publish(\n ros_utils.build_image_msg(\n image_msg.header,\n self._debug_image_creator.get_image(),\n 'bgr8'))", "def detect(self, image, dconfig={}, detector_name=\"default\", id=\"\"):\n kwargs = {\"timeout\": self.timeout}\n if self.auth_key:\n kwargs['headers'] = {'doods-auth-key': self.auth_key}\n response = requests.post(\n self.url + \"/detect\", json={\"data\": PyDOODS.encode_image(image), \"detector_name\": detector_name, \"detect\": dconfig, \"id\": id}, **kwargs)\n response.raise_for_status()\n return response.json()", "def time(request):\n try:\n parameters_dict = {}\n if request.method == \"POST\":\n parameters_dict = parse_post_request(request)\n ner_logger.debug('Start Bulk Detection: %s ' % parameters_dict[PARAMETER_ENTITY_NAME])\n elif request.method == \"GET\":\n parameters_dict = get_parameters_dictionary(request)\n ner_logger.debug('Start: %s ' % parameters_dict[PARAMETER_ENTITY_NAME])\n\n timezone = parameters_dict[PARAMETER_TIMEZONE] or None\n form_check = True if parameters_dict[PARAMETER_STRUCTURED_VALUE] else False\n range_enabled = True if parameters_dict[PARAMETER_RANGE_ENABLED] else False\n time_detection = TimeDetector(entity_name=parameters_dict[PARAMETER_ENTITY_NAME],\n language=parameters_dict[PARAMETER_SOURCE_LANGUAGE],\n timezone=timezone)\n\n time_detection.set_bot_message(bot_message=parameters_dict[PARAMETER_BOT_MESSAGE])\n\n message = parameters_dict[PARAMETER_MESSAGE]\n entity_output = None\n\n if isinstance(message, six.string_types):\n entity_output = time_detection.detect(message=message,\n structured_value=parameters_dict[PARAMETER_STRUCTURED_VALUE],\n fallback_value=parameters_dict[PARAMETER_FALLBACK_VALUE],\n form_check=form_check,\n range_enabled=range_enabled)\n elif isinstance(message, (list, tuple)):\n entity_output = time_detection.detect_bulk(messages=message)\n\n ner_logger.debug('Finished %s : %s ' % (parameters_dict[PARAMETER_ENTITY_NAME], entity_output))\n except TypeError as e:\n ner_logger.exception('Exception for time: %s ' % e)\n return HttpResponse(status=500)\n\n return HttpResponse(json.dumps({'data': entity_output}), content_type='application/json')", "def get_detections(self, image):\n self.img = jetson.utils.cudaFromNumpy(image)\n self.width = image.shape[1]\n self.height = image.shape[0]\n detections = self._net.Detect(self.img, self.width, self.height)\n print(\"The inference is happening at \" + str(self._net.GetNetworkFPS()) + \" FPS\")\n return detections, jetson.utils.cudaToNumpy(self.img)", "def send_data(self):\n self.socket.send(\"DATA\\r\\n\")\n response = self.get_response()\n if response[0] != 354:\n print \"An error has occured try again\"\n print response[1]\n sys.exit(0)", "def number(request):\n try:\n parameters_dict = {}\n if request.method == \"POST\":\n parameters_dict = parse_post_request(request)\n ner_logger.debug('Start Bulk Detection: %s ' % parameters_dict[PARAMETER_ENTITY_NAME])\n elif request.method == \"GET\":\n parameters_dict = get_parameters_dictionary(request)\n ner_logger.debug('Start: %s ' % parameters_dict[PARAMETER_ENTITY_NAME])\n\n number_detection = NumberDetector(entity_name=parameters_dict[PARAMETER_ENTITY_NAME],\n language=parameters_dict[PARAMETER_SOURCE_LANGUAGE],\n unit_type=parameters_dict[PARAMETER_NUMBER_UNIT_TYPE])\n\n if parameters_dict[PARAMETER_MIN_DIGITS] and parameters_dict[PARAMETER_MAX_DIGITS]:\n min_digit = int(parameters_dict[PARAMETER_MIN_DIGITS])\n max_digit = int(parameters_dict[PARAMETER_MAX_DIGITS])\n number_detection.set_min_max_digits(min_digit=min_digit, max_digit=max_digit)\n\n message = parameters_dict[PARAMETER_MESSAGE]\n entity_output = None\n\n if isinstance(message, six.string_types):\n entity_output = number_detection.detect(message=message,\n structured_value=parameters_dict[PARAMETER_STRUCTURED_VALUE],\n fallback_value=parameters_dict[PARAMETER_FALLBACK_VALUE],\n bot_message=parameters_dict[PARAMETER_BOT_MESSAGE])\n elif isinstance(message, (list, tuple)):\n entity_output = number_detection.detect_bulk(messages=message)\n\n ner_logger.debug('Finished %s : %s ' % (parameters_dict[PARAMETER_ENTITY_NAME], entity_output))\n except TypeError as e:\n ner_logger.exception('Exception for numeric: %s ' % e)\n return HttpResponse(status=500)\n\n return HttpResponse(json.dumps({'data': entity_output}), content_type='application/json')", "def vis_detections(im, class_name, dets, thresh=0.8):\n global num\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n frame = im\n im = im[:, :, (2, 1, 0)]\n #fig, ax = plt.subplots(figsize=(12, 12))\n #ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 255), 2)\n print(num)\n cv2.imwrite('./'+str(num)+\".jpg\", frame)", "def _handleRequestCableCheckStatus(self, data):\r\n print(\"\\\"Request Cable Check Status\\\" received\")\r\n self.whitebeet.v2gParseRequestCableCheckStatus(data)\r\n try:\r\n self.whitebeet.v2gSetDcCableCheckStatus(True)\r\n except Warning as e:\r\n print(\"Warning: {}\".format(e))\r\n except ConnectionError as e:\r\n print(\"ConnectionError: {}\".format(e))", "def test_check_status(mock_send_message):\n A1sim.check_status(BASE_URL)\n mock_send_message.assert_called_once_with('GET',\n 'Get ric status',\n (f\"{BASE_URL}\"))", "def vr_http_classify(self, img):\n\n img = cv2.resize(img, (self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"], \n self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"]))\n img = self.reshape(img)\n \n return self.get_predictions(img)", "def request():\n return face_client.face.detect_with_stream(image=open(\"frame.png\", 'rb'),\n return_face_attributes=[emotion_attribute],\n recognition_model='recognition_02')", "async def run(self):\n\n result = {'start_timestamp': time()}\n\n ping = await create_subprocess_exec(\"/bin/ping\",\n self.device,\n \"-c \" + self.count,\n \"-l \" + self.preload,\n \"-W \" + self.timeout,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout = await ping.stdout.read()\n stderr = await ping.stderr.read()\n\n if stderr:\n result['error'] = stderr.decode('utf-8').strip()\n else:\n lines = stdout.splitlines()\n second_last_line = lines[len(lines)-2].decode('utf-8').split()\n last_line = lines[len(lines)-1].decode('utf-8')\n if not last_line:\n # if the last line is empty\n # none of the packets arrived\n result['error'] = 'Host unreachable'\n result['packets_sent'] = second_last_line[0]\n result['packets_recv'] = second_last_line[3]\n else:\n last_line = last_line.split()[3].split('/')\n result['min'] = last_line[0]\n result['avg'] = last_line[1]\n result['max'] = last_line[2]\n result['mdev'] = last_line[3]\n result['packets_sent'] = second_last_line[0]\n result['packets_recv'] = second_last_line[3]\n\n result['end_timestamp'] = time()\n self.results.append(result)\n return result", "def _send(self, service_check):\n raise NotImplementedError()", "def get_data(self):\n global CAM\n count = 0\n while CAM.isOpened():\n count += 1\n print('COUNT' + str(count))\n _, frame = CAM.read()\n\n # cropped face\n cropped_face, bbox_coordinate, anchor_coordinate = detect_faces(frame)\n if cropped_face is None:\n print(\"NONE FACE DETECTED\")\n sleep(1)\n continue\n\n # get fake face\n fake_face, profile_feature_vector = generate_frontal_face(cropped_face)\n\n cropped_face = cv2.cvtColor(cropped_face, cv2.COLOR_BGR2RGB)\n fake_face = cv2.cvtColor(fake_face, cv2.COLOR_BGR2RGB)\n\n # face matching\n face_matcher = FaceMatcher()\n matched_face, matched_name, matched_front_fake_face, matched_diff = \\\n face_matcher.match(cropped_face, fake_face, profile_feature_vector)\n\n matched_face = cv2.cvtColor(matched_face, cv2.COLOR_BGR2RGB)\n matched_front_fake_face = cv2.cvtColor(matched_front_fake_face, cv2.COLOR_BGR2RGB)\n\n _, cropped_face_jpeg = cv2.imencode('.jpg', cropped_face)\n _, fake_face_jpeg = cv2.imencode('.jpg', fake_face)\n _, matched_face_jpeg = cv2.imencode('.jpg', matched_face)\n _, matched_front_fake_face_jpeg = cv2.imencode('.jpg', matched_front_fake_face)\n\n encoded_cropped_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(cropped_face_jpeg.tobytes()).decode())\n encoded_fake_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(fake_face_jpeg.tobytes()).decode())\n\n encoded_matched_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(matched_face_jpeg.tobytes()).decode())\n encoded_matched_front_fake_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(matched_front_fake_face_jpeg.tobytes()).decode())\n\n # get detection model return here and send to face frontalization model\n SIO.emit('detection', {'cropped_face': encoded_cropped_face,\n 'fake_face': encoded_fake_face,\n 'matched_face': encoded_matched_face,\n 'matched_name': matched_name,\n 'matched_front_fake_face': encoded_matched_front_fake_face,\n 'id': uuid.uuid4().hex},\n namespace='/detections')\n sleep(self.delay)", "def image_handler(self, bot, update):\n text = update.message.text\n if text.startswith('/recon'):\n bot.sendMessage(chat_id=update.message.chat_id, text='*Object recognition*\\nSend me an image',\n parse_mode=ParseMode.MARKDOWN)\n return 10\n elif text.startswith('/faces'):\n bot.sendMessage(chat_id=update.message.chat_id, text='*Face recognition*\\nSend me an image',\n parse_mode=ParseMode.MARKDOWN)\n return 11", "def get_classification(self, image):\n if self.correct_gamma:\n if self.gamma == 1.0:\n self.gamma = 0.6\n elif self.gamma == 0.6:\n self.gamma = 1.0\n image = self.adjust_gamma(image, self.gamma)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_np = np.asarray(image, dtype=\"uint8\")\n image_np_expanded = np.expand_dims(image_np, axis=0)\n\n detected = False\n\n with self.detection_graph.as_default():\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_np_expanded})\n boxes = np.squeeze(boxes)\n classes = np.squeeze(classes).astype(np.int32)\n scores = np.squeeze(scores)\n best_scores = []\n\n for idx, classID in enumerate(classes):\n if self.MODEL_NAME == 'ssdlite_mobilenet_v2_coco_2018_05_09':\n if classID == 10: # 10 is traffic light\n if scores[idx] > 0.10: #confidence level\n best_scores.append([scores[idx], idx, classID])\n detected = True\n else: # we tuned the model to classify only traffic lights\n if scores[idx] > 0.10: # confidence level\n best_scores.append([scores[idx], idx, classID])\n detected = True\n\n tl_index = TrafficLight.UNKNOWN\n if detected:\n best_scores.sort(key=lambda tup: tup[0], reverse=True)\n\n best_score = best_scores[0]\n rospy.logdebug(\"number of TL found %d, best score: %f, color: %f\", len(best_scores), best_score[0], best_score[2])\n nbox = boxes[best_score[1]]\n\n height = image.shape[0]\n width = image.shape[1]\n\n box = np.array([nbox[0]*height, nbox[1]*width, nbox[2]*height, nbox[3]*width]).astype(int)\n box_height = box[2] - box[0]\n box_width = box[3] - box[1]\n ratio = float(box_height)/float(box_width)\n rospy.logdebug(\"ratio: %f\", ratio)\n if ratio >= 2.0 and ratio < 3.0: #started from 2.4\n tl_cropped = image[box[0]:box[2], box[1]:box[3]]\n tl_color, tl_index = self.get_color(tl_cropped)\n #color = ['RED', 'YELLOW', 'GREEN', 'UNKNOWN']\n #tl_index = best_score[2]\n #tl_color = color[tl_index]\n #augment image with detected TLs\n cv2.rectangle(image, (box[1], box[0]), (box[3], box[2]), (0, 255, 0), 2)\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_color = (255, 255, 255)\n cv2.putText(image, tl_color, (box[1], box[0]), font, 2.0, font_color, lineType=cv2.LINE_AA)\n return image, tl_index", "def sendPing(self, payload=None):" ]
[ "0.6287213", "0.6152359", "0.60468817", "0.57658076", "0.5763883", "0.56264436", "0.5618451", "0.56042737", "0.5574418", "0.5567113", "0.5565702", "0.55625457", "0.55605626", "0.553324", "0.5526417", "0.5485286", "0.5476039", "0.54632705", "0.5459137", "0.54455686", "0.5437376", "0.54235965", "0.5406025", "0.5385306", "0.5377799", "0.53760463", "0.5366128", "0.53534037", "0.53421426", "0.5335312", "0.53179544", "0.53041255", "0.5289264", "0.52807903", "0.5271425", "0.52708745", "0.5255102", "0.5252553", "0.52425265", "0.52351284", "0.52351284", "0.52351284", "0.52292883", "0.5228228", "0.5227881", "0.52277255", "0.5226395", "0.5220139", "0.5218317", "0.52057725", "0.52048004", "0.5202069", "0.51906204", "0.5189099", "0.51817477", "0.5175478", "0.51648194", "0.5152719", "0.51475984", "0.51344395", "0.5119591", "0.51177704", "0.51159227", "0.5111173", "0.50969636", "0.50969636", "0.50969636", "0.50969636", "0.50957084", "0.5078554", "0.50784236", "0.5065566", "0.50648296", "0.50624585", "0.5058884", "0.50576776", "0.5057549", "0.5046144", "0.5044781", "0.50445396", "0.5041115", "0.5030913", "0.5026244", "0.5025555", "0.502191", "0.5021531", "0.5019766", "0.50188434", "0.501813", "0.5014838", "0.50137484", "0.50083596", "0.5006964", "0.50068337", "0.5000784", "0.49958888", "0.49951482", "0.49918664", "0.49817768", "0.49774325" ]
0.6724717
0
send detection image data
def send_image(self, image_width, image_height, image): detection_result = [] return self.send_detection_data(image_width, image_height, image, detection_result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_detection_data(self, image_width, image_height,\n image, detection_result):\n if self._send_buffer.full() is True:\n log_error(\"Send detection data failed for buffer is full\")\n return False\n\n image_data = None\n if isinstance(image, AclImage):\n image_data = DataBuf(image.data(), image.size).copy_to_local()\n elif isinstance(image, np.ndarray):\n image_data = image \n else:\n log_error(\"Invalid data to send\") \n return False \n\n request_msg = pm.image_frame_request(image_width, image_height,\n image_data.tobytes(),\n detection_result) \n self.send_message(request_msg) \n self._send_buffer.put(image_data) \n self._release_send_success_data()\n\n return True", "def process_image(self, data):\n try:\n\n # Convert the image from ROS format to OpenCV format\n # 'bgr8' means it will encode as 8-bit values in BGR channels\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n # Apply a threshold to your image\n cv_image = self.bound_green_object(cv_image)\n # Display the modified image\n cv2.imshow('picture', cv_image)\n cv2.waitKey(3)\n except CvBridgeError, e:\n rospy.loginfo(e)", "def callback(self, data):\n\n # Convert sensor_msgs.msg.Image into OpenDR Image\n image = self.bridge.from_ros_image(data)\n self.ID = self.ID + 1\n # Get an OpenCV image back\n image = np.float32(image.numpy())\n name = str(f\"{self.ID:02d}\"+\"_single.jpg\")\n cv2.imwrite(os.path.join(self.args.path_in, name), image)\n\n if (self.ID == 5):\n # Run SyntheticDataGeneration\n self.synthetic.eval()\n self.ID = 0\n # Annotate image and publish results\n current_directory_path = os.path.join(self.args.save_path, str(\"/Documents_orig/\"))\n for file in os.listdir(current_directory_path):\n name, ext = os.path.splitext(file)\n if ext == \".jpg\":\n image_file_savepath = os.path.join(current_directory_path, file)\n cv_image = cv2.imread(image_file_savepath)\n cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)\n if self.image_publisher is not None:\n image = Image(np.array(cv_image, dtype=np.uint8))\n message = self.bridge.to_ros_image(image, encoding=\"bgr8\")\n self.image_publisher.publish(message)\n for f in os.listdir(self.args.path_in):\n os.remove(os.path.join(self.args.path_in, f))", "def callback(self, data):\n\n # Convert sensor_msgs.msg.Image into OpenDR Image\n image = self.bridge.from_ros_image(data)\n rospy.loginfo(\"image info: {}\".format(image.numpy().shape))\n\n # Run pose estimation\n boxes = self.object_detector.infer(image, threshold=0.1, keep_size=False)\n\n # Get an OpenCV image back\n image = np.float32(image.numpy())\n\n # Convert detected boxes to ROS type and publish\n ros_boxes = self.bridge.to_ros_boxes(boxes)\n if self.bbox_publisher is not None:\n self.bbox_publisher.publish(ros_boxes)\n rospy.loginfo(\"Published face boxes\")\n\n # Annotate image and publish result\n # NOTE: converting back to OpenDR BoundingBoxList is unnecessary here,\n # only used to test the corresponding bridge methods\n odr_boxes = self.bridge.from_ros_boxes(ros_boxes)\n image = draw_bounding_boxes(image, odr_boxes, class_names=self.class_names)\n if self.image_publisher is not None:\n message = self.bridge.to_ros_image(np.uint8(image))\n self.image_publisher.publish(message)\n rospy.loginfo(\"Published annotated image\")", "def callback(self, data):\n try:\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n self.image_sub.unregister()\n\n except CvBridgeError as e:\n rospy.logerr(e)\n (rows, cols, channels) = cv_image.shape\n #result = cv2.fastNlMeansDenoisingColored(cv_image, None, 20, 10, 7, 21)\n image = cv_image\n # Resize a 720x1280 image to 360x640 to fit it on the screen\n \"\"\"resized_image = cv2.resize(image, (720 / 2, 1280 / 2))\n cv2.imshow(\"/eyrc/vb/camera_1/image_raw\", resized_image)\n rospy.loginfo(self.get_qr_data(image))\"\"\"\n _,threshold = cv2.threshold(image, 70, 255, cv2.THRESH_TRUNC)\n self.get_qr_data(threshold)\n cv2.waitKey(3)", "def process_image(self):\n\n detect.main(self.nn_args)", "def process(self, image):", "def detect_image_client(img):\n rospy.wait_for_service('detect_service') # attendende che il servizio sia pronto\n rospy.loginfo(\"Detection service invoked\")\n try:\n detect_service = rospy.ServiceProxy('detect_service', Detect) #istanzia il proxy al servizio detect_service\n msg = detect_service(img) # invoca il servizio con un'istanza di Image per ottenere un'istanza di DetectResponse\n return msg.det # restituisce l'istanza di tipo Detection2DArray prelevandola dall'oggetto DetectResponse\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e)", "def send_image(self, device_id, image):\n self.logger.debug(f\"{device_id}: sending processed image!\")\n base64_img = base64.b64encode(\n cv2.imencode('.jpg', image)[1].tostring())\n self.socketio.emit(\n \"image\", {\"message\": base64_img}, room=f\"device-{device_id}\")", "def send_frame(self):\n frame = self.frame_buffer.get()\n result, jpeg = cv2.imencode(\".jpg\", frame.nparray)#, self.encode_param)\n data = numpy.array(jpeg)\n string_data = data.tostring()\n self.sock.send(str(len(string_data)).ljust(16))\n self.sock.send(string_data)", "def send_request(self, img_path):\n\n self.Helpers.logger.info(\"Sending request for: \" + img_path)\n \n _, img_encoded = cv2.imencode('.png', cv2.imread(img_path))\n response = requests.post(\n self.addr, data=img_encoded.tostring(), headers=self.headers)\n response = json.loads(response.text)\n \n return response", "def image_cb(self, msg):\n rospy.logdebug(\"TLDetector.image_cb\")\n self.__has_image = True\n self.__camera_image = msg\n\n cv_image = self.__bridge.imgmsg_to_cv2(msg, \"bgr8\")\n light_wp, state = self.__process_traffic_lights()\n if self.__mode == LABEL_MODE and not self.__classification_done and state != 4:\n self.__classification_done = self.__light_classifier.save_image(\n cv_image, state\n )\n if self.__classification_done:\n rospy.loginfo(\"TLDetector.image_cb: Done generating labels.\")\n\n \"\"\"\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n \"\"\"\n self.__publish_traffic_light_state(light_wp, state)", "def process_payload(payload):\n\n # Convertion of payload string to image array for opencv\n ret, img = make_image(payload)#ret is 0 when conversion is successful or 1 when not\n result='Unable to detect'\n if ret == 0:\n cv2.imwrite('received.png', img)\n try:\n roi = extract_roi_2(img)\n \n result = detect(roi) \n \n #write_characters(roi)\n\n except:\n result = \"----------------\"\n # # When roi is extracted its a 2d array \n \n return result", "def receive_image(self):\n code = self.socket.recv(1)\n self.verify_img_code(code)\n if code[0] == codes['timeout']:\n print(\"Ocurrió un timeout en la conexión\")\n self.close_connection()\n idpokemon = bytes_to_int(self.socket.recv(1))\n self.verify_pokemon(idpokemon)\n tam_image = bytes_to_int(self.socket.recv(4))\n f = open(\"../..\" + str(idpokemon) + \".png\", 'wb')\n l = 1\n while(l):\n l = self.socket.recv(1024)\n f.write(l)\n print(\"Se guardó una imagen del pokémon capturado en el archivo \" +\n str(idpokemon) + \".png.\")\n f.close()\n\n print(\"Sesión terminada.\")\n reply = self.socket.recv(1)\n self.close_connection()", "def send_image(self, path):\n img = cv2.imread(path)\n msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding=\"bgr8\")\n pub = rospy.Publisher('/robot/xdisplay', Image, latch=True, queue_size=1)\n pub.publish(msg)\n # Sleep to allow for image to be published.\n # removed by alice\n #rospy.sleep(1)", "def video_test():\n r = request\n # convert string of image data to uint8\n nparr = np.fromstring(r.data, np.uint8)\n # decode image\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n # do some fancy processing here....\n\n # build a response dict to send back to client\n response = {'message': 'image received. size={}x{}'.format(img.shape[1], img.shape[0])\n }\n print(response)\n # encode response using jsonpickle\n response_pickled = jsonpickle.encode(response)\n cv2.imwrite(\"1.jpg\", img)\n print(\"done\")\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")", "def get_data(self):\n global CAM\n count = 0\n while CAM.isOpened():\n count += 1\n print('COUNT' + str(count))\n _, frame = CAM.read()\n\n # cropped face\n cropped_face, bbox_coordinate, anchor_coordinate = detect_faces(frame)\n if cropped_face is None:\n print(\"NONE FACE DETECTED\")\n sleep(1)\n continue\n\n # get fake face\n fake_face, profile_feature_vector = generate_frontal_face(cropped_face)\n\n cropped_face = cv2.cvtColor(cropped_face, cv2.COLOR_BGR2RGB)\n fake_face = cv2.cvtColor(fake_face, cv2.COLOR_BGR2RGB)\n\n # face matching\n face_matcher = FaceMatcher()\n matched_face, matched_name, matched_front_fake_face, matched_diff = \\\n face_matcher.match(cropped_face, fake_face, profile_feature_vector)\n\n matched_face = cv2.cvtColor(matched_face, cv2.COLOR_BGR2RGB)\n matched_front_fake_face = cv2.cvtColor(matched_front_fake_face, cv2.COLOR_BGR2RGB)\n\n _, cropped_face_jpeg = cv2.imencode('.jpg', cropped_face)\n _, fake_face_jpeg = cv2.imencode('.jpg', fake_face)\n _, matched_face_jpeg = cv2.imencode('.jpg', matched_face)\n _, matched_front_fake_face_jpeg = cv2.imencode('.jpg', matched_front_fake_face)\n\n encoded_cropped_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(cropped_face_jpeg.tobytes()).decode())\n encoded_fake_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(fake_face_jpeg.tobytes()).decode())\n\n encoded_matched_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(matched_face_jpeg.tobytes()).decode())\n encoded_matched_front_fake_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(matched_front_fake_face_jpeg.tobytes()).decode())\n\n # get detection model return here and send to face frontalization model\n SIO.emit('detection', {'cropped_face': encoded_cropped_face,\n 'fake_face': encoded_fake_face,\n 'matched_face': encoded_matched_face,\n 'matched_name': matched_name,\n 'matched_front_fake_face': encoded_matched_front_fake_face,\n 'id': uuid.uuid4().hex},\n namespace='/detections')\n sleep(self.delay)", "def main_recognition():\n if request.method == 'POST':\n # print(request.url)\n # stream = BytesIO(request.data)\n # image = Image.open(stream).convert(\"RGBA\")\n # path = 'C:/Users/13/Documents/FRS_v1/path.png'\n # image = image.save(path)\n # stream.close()\n #df = faces_info_export(path)\n print(request.url)\n stream = BytesIO(request.data)\n img_pil=Image.open(stream).convert(\"RGB\")\n stream.close()\n img_cv=np.array(img_pil)\n try:\n df = faces_info_export(img_cv)\n return df.to_json(orient='index')\n except SystemError as er:\n \tprint(er)\n \treturn json.dumps({'msg':'error'})\n except AttributeError as er:\n \tprint(er)\n \treturn json.dumps({'msg':'error'})\n if request.method == 'GET':\n # ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}\n df = faces_info_export(\"C:/Users/13/Documents/FRS_v1/test_image.jpg\")\n return df.to_json(orient='index')", "def handle_image_data(data):\n \n #Get the incoming RGB image from the Kinect\n D.image = D.bridge.imgmsg_to_cv(data, \"bgr8\")\n\n if D.created_images == False:\n #Initialize the additional images we need for processing\n ImageProcessing.initialize(D)\n D.created_images = True\n\n # Recalculate threshold image\n ImageProcessing.threshold_image(D)\n\n # Recalculate blob in main image\n ImageProcessing.find_biggest_region(D)\n\n # Check on the display of dragged section\n ImageProcessing.mouse_section(D)\n\n #Display target circle\n #ImageProcessing.target_coord(D)\n \n #Display info box on image\n ImageProcessing.draw_on_image(D)\n \n #Handle incoming key presses\n key_press = cv.WaitKey(5) & 255\n if key_press != 255:\t\t\t#Handle only if it's a real key\n check_key_press(D, key_press)\t\t#(255 = \"no key pressed\")\n\n #Update the displays:\n #Show main image in the image window\n #cv.ShowImage('Image', D.image)\n\n #Show threshold image in the threshold window 3currentThreshold = getattr(D, D.current_threshold)\n cv.ShowImage('Threshold', currentThreshold)", "def capture_image(self, data={}):\n # call self.increment_count() after each image saved\n pass", "def sendDetection(self, idData, classes, aux=None):\n self.dp.URL = self.URL\n self.dp.sendDetection(classifier=self.Config[\"MACHINE_NAME\"], \n idData=idData, classes=classes, aux=aux)", "def detect(model, dataset_dir, subset):\n print(\"Running on {}\".format(dataset_dir))\n\n # Create directory\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n submit_dir = \"submit_{:%Y%m%dT%H%M%S}\".format(datetime.datetime.now())\n submit_dir = os.path.join(RESULTS_DIR, submit_dir)\n os.makedirs(submit_dir)\n\n # Read dataset\n dataset = TamperDataset()\n dataset.load_tamper(dataset_dir, subset)\n dataset.prepare()\n # Load over images\n submission = []\n f1 = 0\n print(len(dataset.image_ids))\n # for image_id in dataset.image_ids:\n # # Load image and run detection\n # image = dataset.load_image(image_id)\n # # Detect objects\n # r = model.detect([image], verbose=0)[0]\n\n # # Encode image to RLE. Returns a string of multiple lines\n # source_id = dataset.image_info[image_id][\"id\"]\n # rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n # submission.append(rle)\n # # Save image with masks\n\n # N = r[\"scores\"].shape[0]\n # if not N:\n # \tH, W, C = image.shape\n # \tmask = np.zeros((H,W))\n\n \t\n # else:\n\n # H, W, C = image.shape\n\n # idx = np.argsort(-r[\"scores\"])\n # mask = r[\"masks\"][:,:,idx[0]].astype(np.float32)\n\n # bbox = r[\"rois\"][idx[0], :4]\n\n # y1, x1, y2, x2 = bbox\n\n\n\n # mask = dense_crf(image, mask)\n\n # mask = np.where(mask >= 0.5, 255, 0)\n\n # H, W, C = image.shape\n\n # full_mask = np.zeros((H, W))\n # full_mask[y1:y2, x1:x2] = mask\n\n for image_id in dataset.image_ids:\n # Load image and run detection\n image = dataset.load_image(image_id)\n # ela=dataset.load_ela(image_id)\n # Detect objects\n # r = model.detect([image],[ela], verbose=0)[0]\n r = model.detect([image],verbose=0)[0]\n\n # Encode image to RLE. Returns a string of multiple lines\n source_id = dataset.image_info[image_id][\"id\"]\n rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n submission.append(rle)\n # Save image with masks\n\n N = r[\"scores\"].shape[0]\n if not N:\n H, W, C = image.shape\n mask = np.zeros((H,W))\n\n \n else:\n idx = np.argsort(-r[\"scores\"])\n mask = r[\"masks\"][:,:,idx[0]].astype(np.uint8)\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n\n annotation = dataset.load_annaation(image_id)\n annotation = np.where(annotation >= 0.5, 1, 0) \n f = get_FM(mask, annotation)\n f1 += f\n\n print(f1/len(dataset.image_ids))\n\n\n\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n # visualize.display_instances(\n # image, r['rois'], r['masks'], r['class_ids'],\n # dataset.class_names, r['scores'],\n # show_bbox=False, show_mask=False,\n # title=\"Predictions\")\n # plt.savefig(\"{}/{}.png\".format(submit_dir, dataset.image_info[image_id][\"id\"]))\n\n # Save to csv file\n # submission = \"ImageId,EncodedPixels\\n\" + \"\\n\".join(submission)\n # file_path = os.path.join(submit_dir, \"submit.csv\")\n # with open(file_path, \"w\") as f:\n # f.write(submission)\n print(\"Saved to \", submit_dir)", "def main():\n # Set up socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind(('localhost', 12345))\n dat = b''\n dataSegement = [0] * 5\n\n while True:\n seg, addr = s.recvfrom(MAX_DGRAM)\n print(\"type: \", type(seg))\n chunk_number = struct.unpack(\"B\", seg[0:1])[0]\n if chunk_number > 1:\n print(\"chunk_number: \", chunk_number)\n dat += seg[1:]\n else:\n dat += seg[1:]\n img = cv2.imdecode(np.frombuffer(dat, dtype=np.uint8), 1)\n cv2.imwrite(\"image/4k_image_sample_compressed.jpg\", img)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n dat = b\"\"", "def image_cb(self, msg): # incoming image\n self.has_image = True\n self.camera_image = msg", "def raw_image_callback(self, msg):\n if self.pictures_to_take and not self.detection_to_receive:\n self.pictures_to_take -= 1\n # so let's analyse it here and then delete the subscription\n rows = msg.height\n step = msg.step\n cols = msg.width\n dim = int(step / cols)\n pixels = msg.data # of size (steps, nrows)\n # save the image (later we will need to analyse it)\n vision_utils.save_picture(pixels, rows, cols, dim, self.name, FOLDER)", "def _handle_image(self, image_msg):\n # converting the ROS image message to CV2-image\n image = self._cv_bridge.imgmsg_to_cv2(image_msg, 'bgr8')\n\n # Skip if image is None\n if image is None:\n rospy.logdebug(\"Image content is None :(\", logger_name=\"vision\")\n return\n\n # Check if its the first image callback\n if self._first_image_callback:\n # Check if a cap may be on the camera\n self._handle_forgotten_camera_cap(image)\n\n # Instances that should be notified with the new image\n internal_image_subscribers =[\n self._field_color_detector,\n self._white_color_detector,\n self._red_color_detector,\n self._blue_color_detector,\n self._unknown_obstacle_detector,\n self._field_boundary_detector,\n self._obstacle_detector,\n self._red_obstacle_detector,\n self._blue_obstacle_detector,\n self._goalpost_detector,\n self._line_detector,\n self._ball_detector,\n self._debug_image_creator,\n ]\n\n # Distribute the image to the detectors\n # Iterate over subscribers\n for vision_object in internal_image_subscribers:\n # Send image\n vision_object.set_image(image)\n\n # Check if the vision should run the conventional and neural net part parallel\n if self._config['vision_parallelize']:\n # Create and start threads for conventional calculation and neural net\n #fcnn_thread = Thread(target=self._ball_detector.compute)\n\n conventional_thread = Thread(target=self._conventional_precalculation())\n\n conventional_thread.start()\n #fcnn_thread.start()\n\n # Wait for both threads\n conventional_thread.join()\n #fcnn_thread.join()\n else:\n # Calc conventional calculation and neural net\n self._ball_detector.compute()\n self._conventional_precalculation()\n\n ########\n # Ball #\n ########\n\n # Get a number of top balls under the field boundary, which have an high enough rating\n all_balls = self._ball_detector.get_top_candidates(count=self._max_balls)\n balls_under_field_boundary = \\\n self._field_boundary_detector.candidates_under_convex_field_boundary(\n all_balls,\n self._ball_candidate_y_offset)\n top_balls = candidate.Candidate.rating_threshold(\n balls_under_field_boundary,\n self._ball_candidate_threshold)\n # check whether there are ball candidates\n if top_balls:\n # Convert ball cancidate list to ball message list\n list_of_balls = map(ros_utils.build_ball_msg, top_balls)\n # Create balls msg with the list of balls\n balls_msg = ros_utils.build_balls_msg(image_msg.header, list_of_balls)\n # Publish balls\n self._pub_balls.publish(balls_msg)\n\n # Debug draw all ball candidates\n self._debug_image_creator.draw_ball_candidates(\n all_balls,\n (0, 0, 255))\n # Debug draw possible ball candidates under the field boundary\n self._debug_image_creator.draw_ball_candidates(\n balls_under_field_boundary,\n (0, 255, 255))\n # Debug draw top ball candidate\n self._debug_image_creator.draw_ball_candidates(\n top_balls,\n (0, 255, 0),\n thickness=2)\n\n #############\n # Obstacles #\n #############\n\n # Init list for obstacle msgs\n list_of_obstacle_msgs = []\n # Add red obstacles\n list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.ROBOT_MAGENTA,\n self._red_obstacle_detector.get_candidates()))\n # Add blue obstacles\n list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.ROBOT_CYAN,\n self._blue_obstacle_detector.get_candidates()))\n # Add UFO's (Undefined Found Obstacles)\n list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.UNDEFINED,\n self._unknown_obstacle_detector.get_candidates()))\n # Build obstacles msgs containing all obstacles\n obstacles_msg = ros_utils.build_obstacle_array_msg(image_msg.header, list_of_obstacle_msgs)\n # Publish obstacles\n self._pub_obstacle.publish(obstacles_msg)\n\n # Debug draw unknown obstacles\n self._debug_image_creator.draw_obstacle_candidates(\n self._unknown_obstacle_detector.get_candidates(),\n (0, 0, 0),\n thickness=3)\n # Debug draw red obstacles\n self._debug_image_creator.draw_obstacle_candidates(\n self._red_obstacle_detector.get_candidates(),\n (0, 0, 255),\n thickness=3)\n # Debug draw blue obstacles\n self._debug_image_creator.draw_obstacle_candidates(\n self._blue_obstacle_detector.get_candidates(),\n (255, 0, 0),\n thickness=3)\n\n ########\n # Goal #\n ########\n\n # Get all goalposts under field boundary\n goal_posts = self._field_boundary_detector.candidates_under_convex_field_boundary(\n self._goalpost_detector.get_candidates(),\n self._goal_post_field_boundary_y_offset)\n\n # Get goalpost msgs and add them to the detected goal posts list\n goal_post_msgs = ros_utils.build_goal_post_msgs(goal_posts)\n # Create goalposts msg\n goal_posts_msg = ros_utils.build_goal_post_array_msg(image_msg.header, goal_post_msgs)\n # Check if there is a goal\n if goal_posts_msg:\n # If we have a goal, lets publish it\n self._pub_goal_posts.publish(goal_posts_msg)\n\n # Debug draw all goal posts\n self._debug_image_creator.draw_obstacle_candidates(\n self._goalpost_detector.get_candidates(),\n (180, 180, 180),\n thickness=3)\n # Debug draw goal posts which start in the field\n self._debug_image_creator.draw_obstacle_candidates(\n goal_posts,\n (255, 255, 255),\n thickness=3)\n\n #########\n # Lines #\n #########\n if self._use_line_points:\n # Build a LineSegmentInImage message for each linepoint\n line_points = self._line_detector.get_linepoints()\n # Create line segments\n line_segments = ros_utils.convert_line_points_to_line_segment_msgs(line_points)\n # Create line msg\n line_msg = ros_utils.build_line_information_in_image_msg(image_msg.header, line_segments)\n # Publish lines\n self._pub_lines.publish(line_msg)\n\n # Draw debug line points\n self._debug_image_creator.draw_points(\n line_points,\n (0, 0, 255))\n\n if self._use_line_mask:\n # Define detections (Balls, Goal Posts) that are excluded from the line mask\n excluded_objects = top_balls + goal_posts\n # Get line pixel mask\n line_mask = self._line_detector.get_line_mask_without_other_objects(excluded_objects)\n # Create line mask message\n line_mask_message = ros_utils.build_image_msg(image_msg.header, line_mask, '8UC1')\n # Publish line mask\n self._pub_line_mask.publish(line_mask_message)\n\n # Draw debug line mask\n self._debug_image_creator.draw_mask(\n line_mask,\n color=(255, 0, 0),\n opacity=0.8)\n\n ##################\n # Field boundary #\n ##################\n\n # Get field boundary msg\n convex_field_boundary = self._field_boundary_detector.get_convex_field_boundary_points()\n # Build ros message\n convex_field_boundary_msg = ros_utils.build_field_boundary_polygon_msg(image_msg.header, convex_field_boundary)\n # Publish field boundary\n self._pub_convex_field_boundary.publish(convex_field_boundary_msg)\n\n # Debug draw convex field boundary\n self._debug_image_creator.draw_field_boundary(\n convex_field_boundary,\n (0, 255, 255))\n # Debug draw field boundary\n self._debug_image_creator.draw_field_boundary(\n self._field_boundary_detector.get_field_boundary_points(),\n (0, 0, 255))\n\n #########\n # Debug #\n #########\n '''\n if self._config['neural_network_type'] == 'fcnn':\n # Publish fcnn output for the region of interest under the field boundary (for the world model)\n if self._ball_fcnn_publish_output:\n roi_msg = ros_utils.build_fcnn_region_of_interest(\n self._ball_detector.get_fcnn_output(),\n self._field_boundary_detector,\n image_msg.header,\n self._config['ball_fcnn_publish_field_boundary_offset'])\n self._pub_ball_fcnn.publish(roi_msg)\n\n # Publish whole fcnn output for debug purposes\n if self._publish_fcnn_debug_image:\n self._pub_debug_fcnn_image.publish(self._ball_detector.get_debug_image())\n '''\n # Check, if HSV mask images should be published\n if self._publish_HSV_mask_image:\n # Mask images\n white_mask = self._white_color_detector.get_mask_image()\n red_mask = self._red_color_detector.get_mask_image()\n blue_mask = self._blue_color_detector.get_mask_image()\n\n # Publish mask images\n self._pub_white_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, white_mask, '8UC1'))\n self._pub_red_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, red_mask, '8UC1'))\n self._pub_blue_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, blue_mask, '8UC1'))\n\n # Check, if field mask image should be published\n if self._publish_field_mask_image:\n if isinstance(self._field_color_detector, color.DynamicPixelListColorDetector):\n # Mask image\n dyn_field_mask = self._field_color_detector.get_mask_image()\n static_field_mask = self._field_color_detector.get_static_mask_image()\n # Publish mask image\n self._pub_dynamic_color_lookup_table_field_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, dyn_field_mask, '8UC1'))\n self._pub_field_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, static_field_mask, '8UC1'))\n else:\n # Mask image\n field_mask = self._field_color_detector.get_mask_image()\n # Publish mask image\n self._pub_field_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, field_mask, '8UC1'))\n\n # Check if we should draw debug image\n if self._debug_image_creator.active:\n # publish debug image\n self._pub_debug_image.publish(\n ros_utils.build_image_msg(\n image_msg.header,\n self._debug_image_creator.get_image(),\n 'bgr8'))", "def send_image(path):\n img = cv2.imread(path)\n msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding=\"bgr8\")\n pub = rospy.Publisher('/robot/xdisplay', Image, latch=True, queue_size=1)\n pub.publish(msg)\n # Sleep to allow for image to be published.\n rospy.sleep(1)", "def send_image(path):\n img = cv2.imread(path)\n msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding=\"bgr8\")\n pub = rospy.Publisher('/robot/xdisplay', Image, latch=True, queue_size=1)\n pub.publish(msg)\n # Sleep to allow for image to be published.\n rospy.sleep(1)", "def callback(self,data):\n self.cvtImage(data)\n\n \"\"\" Do some image processing; flip, resize, and etc\"\"\"\n self.imgProcessing()\n\n \"\"\" displaying an OpenCV image \"\"\"\n cv2.imshow(self.cv_window_name, self.cv_image)\n cv2.waitKey(1)\n# ------------------------------------------------------------------------------\n\n try:\n \"\"\" coverting the uint8 OpenCV image to ROS image data \"\"\"\n \"\"\" Publisher.publish() -- explicit way \"\"\"\n self.image_pub.publish(self.bridge.cv2_to_imgmsg(self.cv_image, \"bgr8\"))\n except CvBridgeError as e:\n print(e)", "def detect(self, input_image):\n self.t.start()\n frame = self.convert_image(input_image)\n frame = cv2.pyrDown(frame)\n\n img, confidence, x, y = self.detector.detect(frame)\n print('Detection:', confidence, x, y)\n det = Target_coordinates()\n det.confidence = confidence\n det.x = x\n det.y = y\n self.pub_detection.publish(det)\n self.pub_fpv.publish(self.bridge.cv2_to_imgmsg(img))\n cv2.imwrite('frames/frame%d.jpg' % self.frame_num, img)\n self.frame_num += 1\n self.t.end()\n # Display\n cv2.imshow(self.iw, img)\n key = cv2.waitKey(30) & 0xFF\n if key == 27:\n cv2.destroyAllWindows()\n sys.exit(27)", "def post_image(img_file):\n img = np.array(Image.open(img_file))\n data={\"images\":img.tolist(),\"batch_size\":1}\n json_data=json.dumps(data)\n img=np.array(json.loads(json_data)[\"images\"])\n headers={}\n URL=\"http://127.0.0.1:8080/invocations\"\n response = requests.post(URL, data=json_data, headers=headers)\n return response", "def http_classify(self, req):\n \n if len(req.files) != 0:\n img = np.fromstring(req.files['file'].read(), np.uint8)\n else:\n img = np.fromstring(req.data, np.uint8)\n \n img = cv2.imdecode(img, cv2.IMREAD_UNCHANGED)\n img = cv2.resize(img, (self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"], \n self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"]))\n img = self.reshape(img)\n \n return self.get_predictions(img)", "def test_http_classifier(self):\n \n msg = \"\"\n \n files = 0\n tp = 0\n fp = 0\n tn = 0\n fn = 0\n\n self.addr = \"http://\" + self.Helpers.confs[\"cnn\"][\"api\"][\"server\"] + \\\n ':'+str(self.Helpers.confs[\"cnn\"][\"api\"][\"port\"]) + '/Inference'\n self.headers = {'content-type': 'image/jpeg'}\n\n for data in os.listdir(self.testing_dir):\n if os.path.splitext(data)[1] in self.valid:\n \n response = self.send_request(self.testing_dir + \"/\" + data)\n\n msg = \"\"\n if response[\"Diagnosis\"] == \"Positive\" and \"_1.\" in data:\n tp += 1\n msg = \"Acute Lymphoblastic Leukemia correctly detected (True Positive)\"\n elif response[\"Diagnosis\"] == \"Positive\" and \"_0.\" in data:\n fp += 1\n msg = \"Acute Lymphoblastic Leukemia incorrectly detected (False Positive)\"\n elif response[\"Diagnosis\"] == \"Negative\" and \"_0.\" in data:\n tn += 1\n msg = \"Acute Lymphoblastic Leukemia correctly not detected (True Negative)\"\n elif response[\"Diagnosis\"] == \"Negative\" and \"_1.\" in data:\n fn += 1\n msg = \"Acute Lymphoblastic Leukemia incorrectly not detected (False Negative)\" \n \n files += 1\n \n self.Helpers.logger.info(msg)\n print()\n time.sleep(7)\n \n self.Helpers.logger.info(\"Images Classifier: \" + str(files))\n self.Helpers.logger.info(\"True Positives: \" + str(tp))\n self.Helpers.logger.info(\"False Positives: \" + str(fp))\n self.Helpers.logger.info(\"True Negatives: \" + str(tn))\n self.Helpers.logger.info(\"False Negatives: \" + str(fn))", "def callback(self,data):\n # convert image into openCV format\n bridge = CvBridge()\n try:\n # bgr8 is the pixel encoding -- 8 bits per color, organized as blue/green/red\n cv_image = bridge.imgmsg_to_cv(data, \"bgr8\")\n except CvBridgeError, e:\n # all print statements should use a rospy.log_ form, don't print!\n rospy.loginfo(\"Conversion failed\")\n\n # we could do anything we want with the image here\n # for now, we'll blur using a median blur\n cv2.Smooth(cv_image, cv_image, smoothtype=cv.CV_MEDIAN, param1=31, param2=0, param3=0, param4=0)\n\t\n\t\n\tret,th1 = cv2.threshold(cv_image,127,255,cv2.THRESH_BINARY)\n\tth2 = cv2.adaptiveThreshold(cv_image,255,cv2.ADAPTIVE_THRESH_MEAN_C,\\\n\t\t cv2.THRESH_BINARY,11,2)\n\tth3 = cv2.adaptiveThreshold(cv_image,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\\\n\t\t cv2.THRESH_BINARY,11,2)\n\n\ttitles = ['Original Image', 'Global Thresholding (v = 127)',\n\t\t 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']\n\timages = [cv_image, th1, th2, th3]\n\t\n\tfor i in xrange(4):\n\t\tplt.subplot(2,2,i+1),plt.imshow(images[i],'gray')\n\t\tplt.title(titles[i])\n\t\tplt.xticks([]),plt.yticks([])\n\tplt.show()\n\t\n '''# show the image\n cv2.ShowImage(\"image_view\", cv_image)\n cv2.WaitKey(3)\n\t'''", "def send_image(path):\n img = cv.LoadImage(path)\n msg = cv_bridge.CvBridge().cv_to_imgmsg(img, encoding=\"bgr8\")\n pub = rospy.Publisher('/robot/xdisplay', Image, latch=True)\n pub.publish(msg)\n # Sleep to allow for image to be published.\n rospy.sleep(1)", "def test1234():\n r = request\n #\n data = uncompress_nparr(r.data) #uncompress data\n print(\"data type:{}\", type(data))\n #nparr = np.frombuffer(r.data, np.uint8)\n\n is_success, buffer = cv2.imencode(\".jpg\", data)\n io_buf = io.BytesIO(buffer)\n decode_img = cv2.imdecode(np.frombuffer(io_buf.getbuffer(), np.uint8), -1) # image\n #img = cv2.imdecode(nparr , cv2.IMREAD_COLOR)\n img_name = \"Received_JuanJoxe{}.png\".format(img_counter)\n\n cv2.imwrite(os.path.join(uploads_dir, img_name), decode_img)\n\n #\n data10 = data*10\n print(\"\\n\\nReceived array (compressed size = \"+\\\n str(r.content_length)+\"):\\n\"+str(data))\n resp, _, _ = compress_nparr(data)\n response = {'message': 'image received. size={}x{} name:{}'.format(decode_img.shape[1], decode_img.shape[0], img_name)} #this is json\n print('message image received. size={}x{} name:{}'.format(decode_img.shape[1], decode_img.shape[0], img_name))\n\n\n # encode response using jsonpickle\n response_pickled = jsonpickle.encode(response)\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")", "def send_jpg_frame_REP_watcher(self, text, image):\n\n ret_code, jpg_buffer = cv2.imencode(\n \".jpg\", image, [int(cv2.IMWRITE_JPEG_QUALITY),\n self.jpeg_quality])\n self.REQ_sent_time.append(datetime.utcnow()) # utcnow 2x faster than now\n try:\n hub_reply = self.sender.send_jpg(text, jpg_buffer)\n except: # add more specific exception, e.g. ZMQError, after testing\n print(\"Exception at sender.send_jpg in REP_watcher function.\")\n self. fix_comm_link()\n self.REP_recd_time.append(datetime.utcnow())\n return hub_reply", "def vr_http_classify(self, img):\n\n img = cv2.resize(img, (self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"], \n self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"]))\n img = self.reshape(img)\n \n return self.get_predictions(img)", "def _ros_image_callback(self, msg: Image):\n cv2_img = self._cv_bridge.imgmsg_to_cv2(msg, \"bgr8\")\n self._telegram_updater.bot.send_photo(\n self._telegram_chat_id,\n photo=BytesIO(cv2.imencode(\".jpg\", cv2_img)[1].tobytes()),\n caption=msg.header.frame_id,\n )", "def onImageReceived(self, msg):\n\n self.BGR = self.bridge.imgmsg_to_cv2(msg)\n self.processImage(self.BGR)", "def serve_inference_requests():\n global image_queue\n\n with tf.Session() as sess:\n while True:\n image_data = image_queue.get()\n\n tensor = sess.graph.get_tensor_by_name('final_result:0')\n predictions = sess.run(tensor, {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n\n top_k = predictions.argsort()[-NUM_PREDICTIONS:][::-1]\n\n human_string = labels[top_k[0]]\n score = predictions[top_k[0]]\n logging.info('%s classified with score %.5f', human_string, score)\n\n emit_image = False\n if human_string != 'nothing':\n emit_image = True\n logging.debug('emitting image cause %s was detected', human_string)\n elif score <= config['inference']['threshold']:\n emit_image = True\n logging.debug('emitting image cause score %.5f is below threshold of %s',\n score, config['inference']['threshold'])\n else:\n logging.debug('image not emitted, cause nothing was detected with a probability of %.5f',\n score)\n\n if emit_image:\n mqtt_publish(image_data)\n else:\n save_image(image_data)", "def send_data(self, goal_image=None):\n formatted_data = str(self.format_data(goal_image))\n self.sock.sendto(formatted_data.replace(' ', '') + '\\n', self._address)", "def image_handler(self, bot, update):\n text = update.message.text\n if text.startswith('/recon'):\n bot.sendMessage(chat_id=update.message.chat_id, text='*Object recognition*\\nSend me an image',\n parse_mode=ParseMode.MARKDOWN)\n return 10\n elif text.startswith('/faces'):\n bot.sendMessage(chat_id=update.message.chat_id, text='*Face recognition*\\nSend me an image',\n parse_mode=ParseMode.MARKDOWN)\n return 11", "def image_callback(self, data):\n\n # Import raw image and convert to cv2 format\n frame = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n # Convert to grayscale (AprilTag detector prefers this)\n gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n detections = self.detector.detect(gray, return_image = False)\n\n num_detections = len(detections)\n rospy.loginfo('Detected {} tags'.format(num_detections))\n # Check to see if any detections\n if (not detections):\n\t\t self.pub.publish(None,None, None)\n\t\t return\n\n\t detection = detections[0]\n retval, rvec, tvec = cv2.solvePnP(self.opts, detection.corners,\n self.cmatrix, self.dists)\n # Update moving average with 60% old, 40% new\n self.tvec_avg = 0.6 * self.tvec_avg + 0.4 * tvec\n # publish the center of the tag.\n self.pub.publish(self.tvec_avg[0], self.tvec_avg[1], self.tvec_avg[2])", "def send_image_frame_REP_watcher(self, text, image):\n\n self.REQ_sent_time.append(datetime.utcnow()) # utcnow 2x faster than now\n try:\n hub_reply = self.sender.send_image(text, image)\n except: # add more specific exception, e.g. ZMQError, after testing\n print(\"Exception at sender.send_image in REP_watcher function.\")\n self. fix_comm_link()\n self.REP_recd_time.append(datetime.utcnow())\n return hub_reply", "def image_test_case(img, expected_results, info_string):\n global passed_count, failed_count\n\n path = TEST_IMGS + img\n\n print(\"\\n\\nTEST: {}\".format(info_string))\n print(\"\\nTesting image handling of {}\".format(path))\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.connect((HOST, PORT))\n\n with open(path, 'rb') as f:\n img_bytes = f.read()\n\n sock.send(START)\n sock.send(GPS)\n sock.send(b'51.5138')\n sock.send(LONG)\n sock.send(b'-0.09847899999999754')\n sock.send(SOF)\n sock.send(img_bytes)\n sock.send(END_MESSAGE)\n\n response_1 = sock.recv(4)\n response_2 = sock.recv(4)\n responses = [response_1, response_2]\n\n for expected in expected_results:\n if expected not in responses:\n print(\"\\n\\tResult: FAILED.\")\n print(\"Expected server response {}. Received {}.\".format(\n expected_results, responses))\n failed_count += 1\n return\n\n print(\"\\n\\tResult: PASSED.\\n\")\n passed_count += 1", "def classifier():\n\tprint(\"Classifying\")\n\t#initialize important variables\n\tminConfidence = 0.5\n\tthresholdValue = 0.3\n\t\n\t\"\"\"\n\tfile = request.files#['image']\n\tfile.save(\"./classifier_image.jpg\")\n\tframe = cv2.imread(\"./classifier_image.jpg\")\n\t\"\"\"\n\tfile = request.json\n\tframe = np.array(file[\"Frame\"], dtype = \"uint8\") \n\n\t#file = request.files['image']\n\t#file.save(\"./classifier_image.jpg\")\n\t#frame = cv2.imread(\"./classifier_image.jpg\")\n\t#file = request.json\n\t#frame = np.array(file[\"contour\"], dtype=\"uint8\")\n\t\n\t#Get Image dimensions\n\timage = cv2.copyMakeBorder(frame, 30, 30, 30, 30, cv2.BORDER_CONSTANT, value=255)\n\t(H, W) = image.shape[:2]\n\t\n\t#Get the output layers parameters\n\tln = net.getLayerNames()\n\tln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\t\n\t#Create a blob to do a forward pass\n\tblob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\tnet.setInput(blob)\n\t#print(H, W)\n\tlayerOutputs = net.forward(ln)\n\tprint(type(net))\n\tboxes = []\n\tconfidences = []\n\tclassIDs = []\n\tfor output in layerOutputs:\n\t\tprint(\"detecting\")\n\t\t#loop over each detection\n\t\tfor detection in output:\n\t\t\t# extract the class ID and confidence (i.e., probability) of\n\t\t\t# the current object detection\n\t\t\tscores = detection[5:]\n\t\t\tclassID = np.argmax(scores)\n\t\t\tconfidence = scores[classID]\n\n\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t# probability is greater than the minimum probability\n\t\t\tif confidence > minConfidence:\n\t\t\t\t# scale the bounding box coordinates back relative to the\n\t\t\t\t# size of the image, keeping in mind that YOLO actually\n\t\t\t\t# returns the center (x, y)-coordinates of the bounding\n\t\t\t\t# box followed by the boxes' width and height\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t# use the center (x, y)-coordinates to derive the top and\n\t\t\t\t# and left corner of the bounding box\n\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t# update our list of bounding box coordinates, confidences,\n\t\t\t\t# and class IDs\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tclassIDs.append(classID)\n\n\t# apply non-maxima suppression to suppress weak, overlapping bounding\n\t# boxes\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, minConfidence, thresholdValue)\n\n\t# ensure at least one detection exists\n\tif len(idxs) > 0:\n\t\toutput = json.load(open(outputFile))\n\t\t# loop over the indexes we are keeping\n\t\tfor i in idxs.flatten():\n\t\t\t# extract the bounding box coordinates\n\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\tprint(LABELS[classIDs[i]], output[LABELS[classIDs[i]]]+1, confidences[i])\n\t\t\toutput[LABELS[classIDs[i]]]+=1\n\t\t\n\t\tjson.dump(output, open(outputFile, \"w\"))\n\t\treturn LABELS[classIDs[i]]\n\telse:\n\t\treturn Response(status=200)", "def detect_object():\n response = None\n try:\n # logger.info(request.Form)\n if request.files['base_image'] is not None:\n base_img = cv2.imdecode(np.fromstring(request.files['base_image'].read(), np.uint8), cv2.IMREAD_UNCHANGED)\n\n if base_img is not None:\n response = predictionService.verify(base_img=base_img)\n else:\n response = BaseResponse(code=400, reason='base_image cannot be null')\n except Exception as e:\n logger.error(e)\n response = BaseResponse(code=500, reason=\"Internal server error occurred. refer to logs\")\n\n return response.toJSON()", "def image_received(self, image_message):\n # Convert the image message to something usable by opencv\n # http://wiki.ros.org/cv_bridge/Tutorials/ConvertingBetweenROSImagesAndOpenCVImagesPython\n # Note that mono8 and bgr8 are the two image encodings expected by most OpenCV functions.\n cv_image = self.bridge.imgmsg_to_cv2(image_message, desired_encoding=\"bgr8\")\n image_data = extract_data(cv_image)\n linear_velocity, angular_velocity = self.clf.predict(image_data)\n self.cmd_vel = Twist(linear=Vector3(x=linear_velocity), angular=Vector3(z=angular_velocity))\n rospy.loginfo(self.cmd_vel)", "def image_bot_callback(msg):\n global img_bot, sub_sampling, img_bot_res\n arr = np.fromstring(msg.data, np.uint8)\n img_bot = cv.resize(cv.imdecode(arr, 1), (0, 0),\n fx=sub_sampling, fy=sub_sampling)\n himg, wimg = img_bot.shape[:2]\n img_bot = cv.resize(img_bot, (int(wimg/3), int(himg/3)))\n img_bot_res = img_bot.copy()", "def process_image(self, msg):\n self.cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding=\"bgr8\")\n self.edge_detected = cv2.Canny(self.cv_image,self.minVal,self.maxVal)\n if cv2.__version__.startswith('3.'):\n _, self.contours,_ = cv2.findContours(self.edge_detected, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n else:\n self.contours,_ = cv2.findContours(self.edge_detected, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n self.contour_image = cv2.drawContours(self.cv_image, self.contours, -1, (0,255,0), 3)\n for i in range(len(self.contours)):\n temp = self.dp(self.contours[i], 20)\n self.res.append(len(temp))\n if len(temp) == 7:\n for i in range(0,len(temp)-1,2):\n cv2.line(self.contour_image, (temp[i][0],temp[i][1]),(temp[i+1][0], temp[i+1][1]), (0,0,255), 5)\n if len(temp) == 5:\n for i in range(0,len(temp)-1,2):\n cv2.line(self.contour_image, (temp[i][0],temp[i][1]),(temp[i+1][0], temp[i+1][1]), (255,0,0), 5)", "def __update_image(self, image_msg: Image):\n self.image = self.bridge.imgmsg_to_cv2(image_msg, desired_encoding='rgb8')\n\n if self.__listener != None:\n self.__listener(self.image,image_msg.header.stamp)", "def save_detection(self, image):\n\t\timg = self.visualize_detection(image)\n\t\timg = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\t\tcv2.imwrite(f'{SAVE_PATH}{self.clip}{self.num_save}.jpg', img)", "def image_inference(self, model_name: str, input_data):\n exec_net, image_input, image_info_input, (n, c, h, w), postprocessor = self.model_loading.load_model(model_name)\n cap, visualizer, tracker, presenter = self.image_visualizer.visualizer(input_data,model_name)\n\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n # Resize the image to keep the same aspect ratio and to fit it to a window of a target size.\n scale_x = scale_y = min(h / frame.shape[0], w / frame.shape[1])\n input_image = cv2.resize(frame, None, fx=scale_x, fy=scale_y)\n\n input_image_size = input_image.shape[:2]\n input_image = np.pad(input_image, ((0, h - input_image_size[0]),\n (0, w - input_image_size[1]),\n (0, 0)),\n mode='constant', constant_values=0)\n # Change data layout from HWC to CHW.\n input_image = input_image.transpose((2, 0, 1))\n input_image = input_image.reshape((n, c, h, w)).astype(np.float32)\n input_image_info = np.asarray([[input_image_size[0], input_image_size[1], 1]], dtype=np.float32)\n # Run the net.\n feed_dict = {image_input: input_image}\n if image_info_input:\n feed_dict[image_info_input] = input_image_info\n outputs = exec_net.infer(feed_dict)\n # Parse detection results of the current request\n scores, classes, boxes, masks = postprocessor(\n outputs, scale_x, scale_y, *frame.shape[:2], h, w, 0.5)\n os.remove(input_data.filename)\n class_labels = self.fetch_labels.get_labels(model_name)\n\n t = 0\n for key2 in [class_labels[i] for i in classes]:\n x1 = str(boxes[t][0])\n y1 = str(boxes[t][1])\n x2 = str(boxes[t][2])\n y2 = str(boxes[t][3])\n\n if key2 in self.prediction.keys():\n value_init = self.prediction.get(key2)\n self.prediction[key2] = x1, y1, x2, y2\n value = value_init, self.prediction.get(key2)\n self.prediction[key2] = value\n\n else:\n self.prediction[key2] = x1, y1, x2, y2\n\n t = t + 1\n\n with open('./final_json.json', 'w') as file:\n json.dump(self.prediction, file)\n\n with open('./final_json.json','r') as file:\n json_object = json.load(file)\n\n return json_object\n cv2.destroyAllWindows()\n cap.release()", "def process_image(self):\n pass", "def predict(self, request):\r\n f = request.files['image']\r\n \r\n img = Image.open(f)\r\n \r\n image = img.convert('RGB')\r\n \r\n image_np = load_image_into_numpy_array(image)\r\n output_dict = run_inference_for_single_image(model, image_np)\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n image_np,\r\n output_dict['detection_boxes'],\r\n output_dict['detection_classes'],\r\n output_dict['detection_scores'],\r\n category_index,\r\n instance_masks=output_dict.get('detection_masks_reframed', None),\r\n use_normalized_coordinates=True,\r\n line_thickness=2, \r\n min_score_thresh=0.45, \r\n skip_scores=True)\r\n \r\n result_image = Image.fromarray(image_np)\r\n \r\n raw_bytes = BytesIO()\r\n result_image.save(raw_bytes, \"PNG\")\r\n \r\n return base64.b64encode(raw_bytes.getvalue()).decode(\"utf-8\")", "def image_callback(img_msg):\n bridge = CvBridge()\n try:\n # Convert from sensor_msgs::Image to cv::Mat\n \tcv_image = bridge.imgmsg_to_cv2(img_msg, desired_encoding=\"passthrough\")\n \t# Access global variable and store image as numpy.array\n \tglobal _last_image\n \t_last_image = np.asarray(cv_image)\n except CvBridgeError as ex:\n\tprint ex", "def detect(model, dataset_dir, subset):\n print(\"Running on {}\".format(dataset_dir))\n # Create directory\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n #submit_dir = \"submit_{:%Y%m%dT%H%M%S}\".format(datetime.datetime.now())\n submit_dir = os.path.join(RESULTS_DIR, \"submit\")\n #os.makedirs(submit_dir)\n\n # Read dataset\n img_ids = []\n dataset_dir = os.path.join(dataset_dir, subset)\n image_file = os.listdir(dataset_dir)\n #submission = []\n for img in image_file:\n if not img.startswith('.'):\n img_file = os.path.join(dataset_dir, img)\n image = skimage.io.imread(img_file)\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # Detect object\n\t\t\t\n r = model.detect([image])[0]\n # Encode image to RLE. Returns a string of multiple lines\n source_id = img.split(\".\")[0]\n #rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n #submission.append(rle)\n # Save image with masks\n visualize.display_instances(\n image, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'],\n #show_bbox=False, show_mask=False,\n title=\"Predictions\")\n plt.savefig(\"{}/{}.png\".format(submit_dir, source_id))\n\n\n\t\t\n # Save to csv file", "def camera_callback(self, data):\n try:\n self.camera_buffer.put(data.images)\n self.publish_sensor_message()\n except Exception as ex:\n self.get_logger().error(f\"Error in camera callback: {ex}\")", "def imageCallback(self, msg):\n if self.current_exposure and (not self.current_exposure in self.scores):\n self.img = self.bridge.imgmsg_to_cv2(msg, \"bgr8\")\n self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)\n\n self.scores[self.current_exposure] = self.computeScore(self.img)\n #rospy.loginfo('Score: {}'.format(self.scores[self.current_exposure]))", "def cam_callback(msg):\n #cam_window_name = \"Baxter Video Feed\"\n bridge = CvBridge() #instantiate CvBridge\n img_bgr = bridge.imgmsg_to_cv2(msg, \"bgr8\") #ROS Image msg to OpenCV2\n self.img = img_bgr", "def model(msg):\n url = 'https://southcentralus.api.cognitive.microsoft.com/customvision/v3.0/Prediction/\\\n eff56ac8-0f36-41d9-93a9-da19396b0f30/detect/iterations/Iteration2_ppl_focus/image'\n headers = {\n 'Prediction-Key': os.getenv('AZURE_VIS_KEY'),\n 'Content-Type': 'application/octet-stream'\n }\n r = requests.post(url=url, headers=headers, data=msg['img'])\n predictions = r.json()\n print('Number of object predictions: {}'.format(\n len(predictions['predictions'])))\n print('Frame Number:', msg['frame_num'],\n 'Image Dimensions:', np.array(Image.open(BytesIO(msg['img']))).shape)\n Coord_matrix = Bbox(predictions)\n return Coord_matrix, len(predictions['predictions'])", "def yolo_detection(raw_image):\n class_ids = []\n confidences = []\n boxes = []\n height , width ,c= raw_image.shape\n blob = cv2.dnn.blobFromImage(raw_image, 0.00392, (416,416), (0,0,0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(output_layers)\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.4:\n center_x = int(detection[0]*width)\n center_y = int(detection[1]*height)\n w = int(detection[2]*width)\n h = int(detection[3]*height)\n ##Rectangle Draw\n topleft_x = int(center_x-(w/2))\n topleft_y = int(center_y-(h/2))\n\n boxes.append([topleft_x,topleft_y,w,h])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\n #DISPLAY DETECTION\n total_detections = len(boxes)\n for i in range(total_detections):\n if i in indexes:\n topleft_x, topleft_y, w,h = boxes[i]\n label = detection_classes[class_ids[i]]\n cv2.rectangle(raw_image, (topleft_x,topleft_y), (topleft_x+w,topleft_y+h), (0,100,255), 1)\n cv2.putText(raw_image, label, (topleft_x, topleft_y),cv2.FONT_HERSHEY_COMPLEX,1,(0,165,255))\n\n\n return raw_image", "def image_cb(self, msg):\n self.has_image = True\n self.camera_image = msg", "def imageCallback(self, image):\n\n ############################################################################################\n # Begin image processing code (You write this!)\n\n feature_vector = [] # TODO: Fill this in with the features you extracted from the image\n\n # End image processing code\n ############################################################################################\n\n classification = self.classify_client(feature_vector)\n print('Classified image as: ' + str(classification.result))", "def webcam_submit():\n\n # Base64 string of image.\n pic_64 = request.form['file'].partition('base64,')[2]\n\n # Convert base64 string to bytes object.\n pic = base64.b64decode(pic_64)\n\n # Save bytes object to storage and predict.\n destination_filename = save_photo(pic)\n destination = os.path.join('app/static/img/tmp', destination_filename)\n pred_class, pred_idx, outputs = classify_photo(destination=destination)\n\n # If probability of classifying the image is less than 92%, ask user to\n # resubmit a different picture.\n if max(outputs) < 0.92:\n print(f\"{pred_class}: {max(outputs)}\")\n flash(\n \"We are unsure about What Those R. Please try another image.\",\n \"form-warning\"\n )\n return jsonify({\"redirect\": url_for('index')})\n\n else:\n return jsonify({\"results\":\n url_for('results',\n pred_class=str(pred_class).replace('_',\n ' '),\n pred_prob=round(max(outputs).item() * 100,\n 4),\n img=os.path.join(\n 'img/tmp',\n destination_filename)\n )\n })", "def get_response(image):\n encoded = base64.b64encode(image.read())\n GOOGLE_CLOUD_VISION_API_URL = 'https://vision.googleapis.com/v1/images:annotate?key='\n API_KEY = 'AIzaSyCKFsYnfYoLFeD2OHpvcjky9opfhHKFnP0'\n api_url = GOOGLE_CLOUD_VISION_API_URL + API_KEY\n header = {'Content-Type': 'application/json'}\n body = json.dumps({\n\t\t\t'requests': [{\n\t\t\t\t'image': {\n\t\t\t\t\t'content': encoded.decode(\"utf-8\"),\n\t\t\t\t},\n\t\t\t\t'features': [{\n\t\t\t\t\t'type': 'DOCUMENT_TEXT_DETECTION',\n\t\t\t\t}]\n\t\t\t}]\n\t\t})\n d = requests.post(api_url,data=body).json()\n return d", "def visualize_detection(self, image):\n\t\tH, W, _ = image.shape\n\t\tpos_list = self.apply_detection(image)\n\t\tdetections = {}\n\t\thasDetection = False\n\t\tfor i, L in enumerate(pos_list):\n\t\t\ttext, coordinates = L[0], L[1]\n\t\t\tCOLOR = COLORS[text]\n\t\t\tfor x, y, w, h in coordinates:\n\t\t\t\t# prune bad homography points\n\t\t\t\tif x < 0 or y < 0 or x + w > W or \\\n\t\t\t\t y + h > H or w <= 1 or h <= 1:\n\t\t\t\t\tcontinue\n\t\t\t\t# add the detection to the dict for tracking\n\t\t\t\tdetections[self.num_detect] = (x, y, w, h)\n\t\t\t\tself.detection_index[self.num_detect] = (x, y, w, h, self.num_save, text)\n\t\t\t\tself.num_detect += 1\n\t\t\t\thasDetection = True\n\t\t\t\t# if the detection is human\n\t\t\t\tif text == 'face':\n\t\t\t\t\tgender = self.genderDetect.classify(image[y:y+h, x:x+w, :])\n\t\t\t\t\tgender = 'female' if gender[0] < 0.5 else 'male'\n\t\t\t\t\tcv2.putText(image, gender, (x + w // 2 -10, y + h + 15),\n\t\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)\n\n\t\t\t\timage = cv2.rectangle(image, (x, y), (x + w, y + h), COLOR, 2)\n\t\t\t\tcv2.putText(image, text, (x, y - 5),\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)\n\t\tif hasDetection:\n\t\t\tself.detection_frames[self.num_save] = detections\n\t\tself.num_save +=1\n\t\treturn image", "def image_cb(self, msg):\n\n # Save the camera image\n self.camera_image = msg\n\n # I sufficient information is available...\n if not None in (self.camera_image, self.waypoint_tree, self.lights):\n\n # Find index and color state of next light\n light_wp, state = self.process_traffic_lights()\n\n # If the light is green...\n if state == TrafficLight.GREEN:\n\n # Publish sentinel indicatig no red light\n self.upcoming_red_light_pub.publish(Int32(-1))\n\n else:\n\n # Publish the traffic light index\n self.upcoming_red_light_pub.publish(Int32(light_wp))", "def get_data(self):\n global CAM\n while CAM.isOpened():\n _, frame = CAM.read()\n _, jpeg = cv2.imencode('.jpg', frame)\n encoded_img = \"data:image/jpg;base64,\" + str(base64.b64encode(jpeg.tobytes()).decode())\n SIO.emit('video_frame',\n {'frame': encoded_img},\n namespace='/live-stream')\n sleep(self.delay)", "def im_detect(net, target_data,im_data, im_info, features_given=True):\n\n cls_prob, rois = net(target_data, im_data, im_info,\n features_given=features_given)\n scores = cls_prob.data.cpu().numpy()[0,:,:]\n zs = np.zeros((scores.size, 1))\n scores = np.concatenate((zs,scores),1)\n boxes = rois.data.cpu().numpy()[0,:, :]\n\n return scores, boxes", "def on_image(self, image):", "def send_processd_for_prediction(self):\n resized_image = cv2.resize(self.processed_image, (28, 28))\n self.send_proccesd_image_to_ML.emit(resized_image)", "def _image_callback(self, msg):\n\n try:\n cv_image = self.bridge.imgmsg_to_cv2(msg, \"bgr8\")\n dil_size = self._sliderDil.value()\n eros_size = self._sliderEros.value()\n\t self.cv_image = self._image_widget.calc_bbox(cv_image, dil_size, eros_size)\n self.image = self._image_widget.set_image(cv_image)\n\n if self.save:\n\t\tif self.counter == 5:\n \t self.numImg += 1\n \t self._imgNum_label.setText(str(self.numImg))\n \t self.store_image(self._image_widget.get_image(), self._image_widget.get_bbox(), self.cls_id, self._image_widget.get_mask())\n\t\t self.counter = 0\n\t \telse:\n\t\t self.counter += 1\n except CvBridgeError as e:\n rospy.logerr(e)", "def send(socket, img_array):\n packed_image = msgpack.packb(img_array, default=mn.encode)\n socket.send(packed_image)", "def main_picamera():\n #takephoto() # First take a picture\n\n credentials = GoogleCredentials.get_application_default()\n service = discovery.build('vision', 'v1', credentials=credentials)\n\n with open('image.jpg', 'rb') as image:\n # image_content = base64.b64encode(image.read())\n image_content = image.read()\n service_request = service.images().annotate(body={\n 'requests': [{\n 'image': {\n 'content': image_content.decode('UTF-8')\n },\n 'features': [{\n 'type': 'LOGO_DETECTION',\n 'maxResults': 1\n }]\n }]\n })\n response = service_request.execute()\n\n try:\n label = response['responses'][0]['logoAnnotations'][0]['description']\n except:\n label = \"No response.\"\n\n print(label)", "def image_cb(self, msg):\n self.has_image = True\n self.camera_image_msg = msg", "def test_score_image(self):\n image = Image()\n response = self.client.open(\n '/v2/yolo',\n method='POST',\n data=json.dumps(image),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def send_image(image: PIL.Image.Image):\n import base64\n import io\n\n image = image.convert(\"RGB\")\n buffer = io.BytesIO()\n image.save(buffer, format=\"PNG\")\n image_b64 = base64.b64encode(buffer.getvalue())\n send(\"image\", image_b64.decode(\"utf-8\"))", "def do_stuff(self, net, meta):\n cv2_img = self.img_to_cv2(self.last_img)\n # Now we can use cv2 functions as the image is <type 'numpy.ndarray'>\n # rospy.loginfo(\"cv2_img: \" + str(type(cv2_img)))\n # Your OpenCV stuff\n # cv2_img = cv2.resize(cv2_img, (0,0), fx=0.25, fy=0.25) \n\n (rows,cols,channels) = cv2_img.shape\n # if cols > 60 and rows > 60 :\n # cv2.circle(cv2_img, (50,50), 10, 255)\n \n global x_old\n global no_meas_counter\n global est\n global cor\n global w\n global h\n \n\n r = darknet.detect(net, meta, cv2_img)\n # print(r)\n\n if not r:\n no_meas_counter += 1\n\n for i in r:\n if i[0].decode() == \"person\":\n x, y, w, h = i[2][0], i[2][1], i[2][2], i[2][3]\n xmin, ymin, xmax, ymax = darknet.convertBack(float(x), float(y), float(w), float(h))\n pt1 = (xmin, ymin)\n pt2 = (xmax, ymax)\n cv2.rectangle(cv2_img, pt1, pt2, (0, 255, 0), 2)\n cv2.putText(cv2_img, i[0].decode() + \" [\" + str(round(i[1] * 100, 2)) + \"]\", (pt1[0], pt1[1] + 20), cv2.FONT_HERSHEY_SIMPLEX, 1, [0, 255, 0], 4)\n \n global mp\n mp = np.array([[np.float32(x)],[np.float32(y)]])\n cor = kalman.correct(mp)\n no_meas_counter = 0\n\t\t\n\n else:\n no_meas_counter += 1\n \n # x_old = x\n\n # cv2.imshow(\"cv2_img\", cv2_img)\n # k = cv2.waitKey(1)\n # if k == 27:\n # cv2.destroyAllWindows()\n # exit()\n\n if no_meas_counter < 30:\n est = kalman.predict()\n msg = PolygonStamped()\n msg.header.stamp = rospy.Time.now()\n # msg.polygon.points = [Point32(x=x, y=y), Point32(x=cols, y=rows), Point32(x=w, y=h)]\n msg.polygon.points = [Point32(x=est[0], y=est[1]), Point32(x=cols, y=rows), Point32(x=w, y=h)] \n self.pub_yolo_detection.publish(msg)\n\n # cv2.imshow(\"Image window\", cv2_img)\n # cv2.waitKey(3)\n\n self.pub_images(cv2_img)\n self.is_new_img = False", "def predict():\r\n \r\n data = {\"success\": False}\r\n if flask.request.files.get(\"image\"):\r\n # read image from request\r\n image = flask.request.files[\"image\"].read()\r\n # convert image to BGR\r\n image = read_image_bgr(io.BytesIO(image))\r\n # preprocess image for model\r\n image = preprocess_image(image, mode='pass')\r\n image, scale = resize_image(image)\r\n data[\"scale\"] = scale\r\n\r\n # process image\r\n with graph.as_default():\r\n start_time = time.time()\r\n # generate prediction bounding boxes, scores, and labels on the input image\r\n boxes, scores, labels = model.predict(np.expand_dims(image, axis=0))\r\n # add inference time to data dictionary\r\n data[\"time\"] = time.time() - start_time\r\n\r\n # add prediction boxes, scores, & labels to data dictionary\r\n data[\"predictions\"] = {\"boxes\": boxes.tolist(),\r\n \"scores\": scores.tolist(),\r\n \"labels\": labels.tolist()}\r\n\r\n # prediction was successful\r\n data[\"success\"] = True\r\n \r\n # return the data dictionary as a JSON response\r\n return flask.jsonify(data)", "def send_image_frame(self, text, image):\n\n hub_reply = self.sender.send_image(text, image)\n return hub_reply", "def get_response_from_cv_api(data):\n url = 'https://vision.googleapis.com/v1/images:annotate?key={}'.format(API_KEY)\n\n response = requests.post(url=url, data=data, headers={'Content-Type': 'application/json'})\n\n return response", "def predict(self, img):\n logger.info(\"predict() for %s\" %threading.current_thread())\n\n #detect face from the image\n face, rect = self.detect_face(img)\n\n if face is None or rect is None:\n #print(\"No face found for img \", type(img))\n return None, None, None, None\n\n if self.redis_server_password is None:\n # No training data available. Just perform detection and return\n # an error message in the subject value.\n warning = \"Training data not available. Redis password not set.\"\n subject = \"No Training Password\" # This will be displayed with the face\n confidence = 0\n logger.warning(\"%s\" %warning)\n return None, subject, confidence, rect\n\n #predict the image using our face recognizer\n label, confidence = self.face_recognizer.predict(face)\n #get name of respective label returned by face recognizer\n label_text = self.face_recognizer.getLabelInfo(label)\n logger.info(\"label=%s label_text=%s\" %(label, label_text))\n\n # print(label_text, confidence, rect)\n return img, label_text, confidence, rect", "def detect(self, detect_img):\n features = self.classifier.detectMultiScale(detect_img,1.3,5)\n self.features = features\n self.features_detected = True", "def run(self):\n if self.stream:\n while True:\n try:\n ret, frame = self.stream.read()\n if ret is True:\n # TODO: replace by a real function that send frame to detection model\n self.detection_model.send_image(image=frame)\n if self.show_in_window:\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n except KeyboardInterrupt:\n self.stream.release()\n cv2.destroyAllWindows()\n self.log.close()\n return None\n except Exception as e:\n self.stream.release()\n cv2.destroyAllWindows()\n self.log.write('Error:Unexpected Error happened:\\n {}'.format(e))\n self.log.close()\n return None\n else:\n self.log.write(\"Error initializing stream....\\n\")\n self.log.close()\n return None", "def loop(self):\n sio = io.StringIO()\n\n if True:\n img = thread1.getImage()\n\n t = clock()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n #fgmask = fgbg.apply(frame)\n #hist = cv2.calcHist([fgmask],[0],None,[256],[0,256])\n gray = cv2.equalizeHist(gray)\n\n rects = detect(gray, cascade)\n\n if len(rects) == 0:\n #print \"List is empty\"\n # detect people in the image\n (rects, weights) = hog.detectMultiScale(gray, winStride=(4, 4),\n padding=(8, 8), scale=1.05)\n rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])\n rects = non_max_suppression(rects, probs=None, overlapThresh=0.65)\n \n if detector.hasMotion():\n if len(rects) == 0:\n GPIO.output(18, GPIO.LOW)\n else:\n GPIO.output(18, GPIO.HIGH)\n \n\n draw_rects(img, rects, (0, 255, 0))\n #if not self.nested.empty():\n # for x1, y1, x2, y2 in rects:\n # roi = gray[y1:y2, x1:x2]\n # vis_roi = vis[y1:y2, x1:x2]\n # subrects = detect(roi.copy(), self.nested)\n # draw_rects(vis_roi, subrects, (255, 0, 0))\n dt = clock() - t\n\n draw_str(img, (20, 20), 'time: %.1f ms' % (dt*1000))\n #draw_str(fgmask, (20, 20), 'white count: %02d' % hist[255])\n\n\n img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n #img = Image.fromarray(fgmask, mode='L')\n img.save(sio, \"JPEG\")\n\n try:\n self.write_message(base64.b64encode(sio.getvalue()))\n except tornado.websocket.WebSocketClosedError:\n self.camera_loop.stop()", "def send_image(self, img_path) -> object:\n method = 'sendPhoto?' + 'chat_id=' + str(self.__chat_id_response())\n if img_path[-4:] not in ['.jpg', '.png']:\n print('Invalid File Format, please use .jpg or .png format')\n sys.exit(1)\n try:\n files = {'photo': open(img_path, 'rb')}\n return requests.post(self.api_url + method, files = files)\n except FileNotFoundError as fl_err:\n print(fl_err)\n sys.exit(1)", "def predict(self, img_path):\n\n img = cv2.imread(img_path)\n img0 = img.copy()\n \n #This happens inside datasets\n # Convert\n img = letterbox(img, new_shape=self.img_size)[0]\n\n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n img = np.ascontiguousarray(img)\n \n #this happens on detect\n img = torch.from_numpy(img).to(self.device)\n img = img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n\n # Inference\n pred = self.model(img)[0]\n\n # Apply NMS\n pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes=self.classes, agnostic=self.agnostic_nms)\n \n # Process detections\n for i, det in enumerate(pred): # detections per image\n if det is not None and len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()\n\n pred = [d.cpu().detach().numpy() for d in pred if d is not None]\n pred = pred[0] if len(pred) else pred\n \n pred = [[[x1, y1, x2, y2],conf] for x1, y1, x2, y2, conf, clss in pred]\n\n return pred", "def extract_detections(self):\n self.rescue_model.setInput(self.human_blob)\n self.predictions = self.rescue_model.forward()", "def send_img(msg, user_name):\n msg['Text'](msg['FileName'])\n itchat.send_image(msg['FileName'], user_name)", "def detect_fn(image):\n\n # image= tf.convert_to_tensor(image, dtype=tf.float32)\n\n image, shapes = model.preprocess(image)\n prediction_dict = model.predict(image, shapes)\n detections = model.postprocess(prediction_dict, shapes)\n\n return detections, prediction_dict, tf.reshape(shapes, [-1])", "def request():\n return face_client.face.detect_with_stream(image=open(\"frame.png\", 'rb'),\n return_face_attributes=[emotion_attribute],\n recognition_model='recognition_02')", "def vis_detections(im, class_name, dets, thresh=0.8):\n global num\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n frame = im\n im = im[:, :, (2, 1, 0)]\n #fig, ax = plt.subplots(figsize=(12, 12))\n #ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 255), 2)\n print(num)\n cv2.imwrite('./'+str(num)+\".jpg\", frame)", "def send_test_data(self, images, state_values, send_q):\n for text_and_image in images:\n send_q.append(text_and_image)\n font = cv2.FONT_HERSHEY_SIMPLEX\n for text_and_value in state_values:\n text, value = text_and_value\n state_image = np.zeros((50, 200), dtype=\"uint8\") # blank image\n cv2.putText(state_image, value, (10, 35), font,\n 1, (255, 255, 255), 2, cv2.LINE_AA)\n text_and_image = (text, state_image)\n send_q.append(text_and_image)", "def send(self, image):\n packed_data = self._pack_data(image)\n for chip in range(self.num_chips):\n for lmt in range(self.num_lmts):\n for packet in range(self.num_packets):\n self.channels[chip][lmt].write(self.channels[chip][lmt].numElements, packed_data[chip][lmt][packet])", "def image_cb(self, msg):\n self.has_image = True\n self.camera_image = msg\n self.get_light_state()", "def on_image_update(self, message_data):\n # Get the image\n try:\n # The image should be already encoded as rgb8, we pass through to avoid costly recomputing\n image_array = self.bridge.compressed_imgmsg_to_cv2(message_data, desired_encoding=\"passthrough\")\n image_array = cv2.rotate(image_array, cv2.ROTATE_90_CLOCKWISE)\n image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2RGB)\n image_array_gray = cv2.cvtColor(image_array, cv2.COLOR_RGB2GRAY)\n corners, ids, rejectedImgPoints = aruco.detectMarkers(image_array_gray, self.aruco_dict, parameters=self.aruco_parameters)\n self.corners = corners\n # For some reason the cv2 transformation rotates the image, haven't figured out why yet\n self.last_image = aruco.drawDetectedMarkers(image_array, corners)\n except CvBridgeError as err:\n print err\n\n # Calculate the frame rate\n self.image_counter += 1\n now = time.time()\n frame_duration = now - self.last_time\n framerate = 1./frame_duration\n # Calculate the average frame rate from the latest update\n self.average_framerate = self.average_framerate + float(framerate - self.average_framerate)/(self.image_counter + 1)\n # End of this frame\n self.last_time = now", "def recognize_image(self, bot, update):\n # get image\n chat_id = update.message.chat_id\n new_image = bot.get_file(update.message.photo[-1].file_id)\n new_image.download('recon.jpg')\n\n group_of_items = [i for i in range(len(docvectors))]\n num_to_select = 3\n list_of_random_items = random.sample(group_of_items, num_to_select)\n jokes = '🤔\\nIt would take a while to recognize objects on the image, Morty. Your grandfather will tell some jokes while you wait :)\\n\\n'\n for joke in list_of_random_items:\n jokes += '- ' + docvectors[joke][1] + \"\\n\\n\"\n bot.sendMessage(chat_id=update.message.chat_id, text=jokes)\n\n try:\n entities = self.detector.recon()\n if len(entities) == 1:\n k = \"Are you blind, Morty? There's \"\n else:\n k = \"Are you blind, Morty? There're \"\n\n d = {}\n for entity in entities:\n d[entity] = d.get(entity, 0) + 1\n\n blacklist = ''\n if len(entities) > 0:\n blacklist = entities[-1]\n\n for i in d:\n if i != blacklist:\n k += detector.NLG(i, d[i], False, False)\n\n if len(d) > 0:\n k += detector.NLG(blacklist, d[blacklist], True, False if len(d) == 1 else True)\n k += ' on the picture'\n else:\n k = \"I can't recognize anything, Morty. Get lost!\"\n except:\n message = \"You broke it again, Morty!\"\n bot.send_message(chat_id=update.message.chat_id, text=message)\n return -1\n # return an image with a caption\n bot.send_photo(chat_id=chat_id, photo=open('recon.jpg', 'rb'), caption=k[:200])\n return -1", "def face_detection_image(self, image_path):\n body = {'file': (image_path, open(image_path, 'rb'), \"multipart/form-data\")}\n url = self.base_url + '/vision-service/phoenix-vision/face-detection/image'\n headers = {\"ApiKey\": self.api_key}\n response = requests.post(url=url, files=body, headers=headers).text\n return response" ]
[ "0.7168172", "0.6857813", "0.6741724", "0.65148985", "0.64819443", "0.64530796", "0.64069283", "0.6382538", "0.6379917", "0.633284", "0.6293329", "0.6292463", "0.6283481", "0.6194607", "0.6180911", "0.617084", "0.61642075", "0.61596763", "0.615218", "0.61351126", "0.6130977", "0.61192364", "0.6112583", "0.61004716", "0.60909986", "0.607433", "0.60375", "0.60375", "0.60308295", "0.60289204", "0.6023003", "0.601802", "0.59893423", "0.5982317", "0.5969707", "0.59684193", "0.59578925", "0.5943859", "0.5942717", "0.5931292", "0.5904541", "0.58971786", "0.58925706", "0.5891222", "0.5874991", "0.5857525", "0.58536637", "0.58380944", "0.58196807", "0.5809291", "0.58062154", "0.57944673", "0.5791611", "0.5780761", "0.5778236", "0.5777901", "0.577613", "0.5774887", "0.5774108", "0.5766059", "0.5765197", "0.57646483", "0.5764235", "0.57551587", "0.57545936", "0.5754115", "0.57520485", "0.57382846", "0.5737305", "0.5737196", "0.5730034", "0.5725016", "0.57188576", "0.57154804", "0.5703547", "0.570317", "0.5700144", "0.56950563", "0.56945974", "0.5689109", "0.5670861", "0.565197", "0.5650989", "0.56414145", "0.56402665", "0.5638494", "0.56381094", "0.5635082", "0.5630386", "0.5628047", "0.5627538", "0.5624074", "0.56229997", "0.56227237", "0.5620677", "0.56136227", "0.5613421", "0.56058145", "0.5604419", "0.56005794" ]
0.7531366
0
get channel presenter_server_ip, port, channel_name, content_type
def get_channel_config(config_file): config = configparser.ConfigParser() config.read(config_file) presenter_server_ip = config['baseconf']['presenter_server_ip'] port = int(config['baseconf']['presenter_server_port']) channel_name = config['baseconf']['channel_name'] content_type = int(config['baseconf']['content_type']) log_info("presenter server ip %s, port %d, channel name %s, " "type %d " % (presenter_server_ip, port, channel_name, content_type)) return presenter_server_ip, port, channel_name, content_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getChannel(self):\r\n return self.channel", "def get_channel(channel_id):\r\n if channel_id[0] == 'C':\r\n type = \"channel\"\r\n elif channel_id[0] == 'G':\r\n type = \"group\"\r\n elif channel_id[0] == 'D':\r\n return False\r\n else:\r\n return False\r\n data = slack_client.api_call(type + \"s.info\", channel=channel_id)\r\n if not data[\"ok\"]:\r\n return False\r\n response = {}\r\n response[\"name\"] = data[type][\"name\"]\r\n response[\"members\"] = data[type][\"members\"]\r\n response[\"channel_id\"] = data[type][\"id\"]\r\n return response", "def getChannelResponse(self):\n \n \n return self.channel_response", "def get_channel(self, channel_id):\n uri = 'channels/' + channel_id\n return self.make_request(uri)", "def extract_medialive_channel_info(ml_client, ml_channel_id):\n mediapackage_channel_list = []\n channel_name = None\n try:\n response = ml_client.describe_channel(\n ChannelId=ml_channel_id\n )\n channel_name = str(response[\"Name\"])\n destinations = response[\"Destinations\"]\n for destination in destinations:\n for output in destination[\"Settings\"]:\n url = str(output[\"Url\"])\n if \"mediapackage\" in url:\n mediapackage_channel_list.append(url)\n except Exception, e:\n print \"Error:\", e.message\n return channel_name, mediapackage_channel_list", "def get_channel(self, channel_name):\n try:\n cm = self.__core.get_service(\"channel_manager\")\n cdb = cm.channel_database_get()\n channel = cdb.channel_get(channel_name)\n return channel.get()\n except Exception:\n traceback.print_exc()", "def parse_channel(self, channel):\n return channel.split(\":\")[1:]", "def extract_channel_views(show_views_channel):\n channel,views,=show_views_channel[1]\n return (channel, views)", "def channel(self):\n return self._channel", "def channel(self):\n return self._channel", "def channel(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"channel\")", "def channelinfo(self):\n\n return ChannelInfo(\n self._filetextbox.text(),\n self._idtextbox.text(),\n self._datafilebox.text()\n )", "def channel_info(channel_id):\n\n if not settings.SLACK_TOKEN:\n return None\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.conversations_info(channel=channel_id)\n assert response['ok'] is True\n return response['channel']\n except SlackApiError as e:\n assert e.response['ok'] is False\n return None", "def get_livechat_channel_info(self):\n self.ensure_one()\n if self.channel_id:\n return self.channel_id.sudo().get_livechat_info()\n return {}", "def channel(self):\n raise NotImplementedError", "def read_channel(self, channel: int, /) -> int:", "def get_channels(self):\n return self.channels", "def get_channel(self):\n if self.channel is None or not self.channel.is_open:\n if not self.connection.is_open:\n self.connection = CONNECTION_MANAGER.get_connection(self.connection_name)\n self.channel = self.connection.channel()\n return self.channel", "def get_channels():\n r = slack.channels.list().body\n return [ c for c in r['channels'] if c['is_member'] ]", "def channels(message):\n load_users(message._client.users)\n for x in message._client.channels:\n chan = message._client.channels[x]\n if 'is_member' in chan:\n if chan['is_member']:\n message.reply(\"{} ({})\".format(chan['name'], chan['id']))\n# message.reply(pretty_json(chan, True))\n elif 'is_im' in chan:\n print(chan)\n friendlyname = chan['user']\n try:\n friendlyname = chan['user'].name\n except KeyError:\n pass\n message.reply(\"User channel: {} ({})\".format(friendlyname,\n chan['id']))", "def channel(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"channel\")", "def channel(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"channel\")", "def channel_details(token, channel_id):\n authorised_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if authorised_u_id not in channel[\"all_members\"]:\n raise AccessError(\"The authorised user is not a member of the channel.\")\n name = channel[\"name\"]\n all_members = []\n owner_members = []\n for member_id in channel[\"all_members\"]:\n member = users.get(member_id)\n all_members.append(\n {\n \"u_id\": member[\"u_id\"],\n \"name_first\": member[\"first_name\"],\n \"name_last\": member[\"last_name\"],\n \"profile_img_url \": member[\"img_url\"],\n }\n )\n for owner_id in channel[\"owners\"]:\n owner = users.get(owner_id)\n owner_members.append(\n {\n \"u_id\": owner[\"u_id\"],\n \"name_first\": owner[\"first_name\"],\n \"name_last\": owner[\"last_name\"],\n \"profile_img_url \": owner[\"img_url\"],\n }\n )\n return {\"name\": name, \"all_members\": all_members, \"owner_members\": owner_members}", "def get_channels(cj): \n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\n channels = opener.open(\"http://www.douban.com/j/app/radio/channels\")\n channel_list = json.loads(channels.read())\n return channel_list[\"channels\"]\n # print channel_list", "def channel(self) -> 'Channel': # stub\n return self._channel", "def get_channel_details(self, chan_ids_list, part='statistics'):\n\n chnl_details = {}\n key = self.keylist[self.keyindex]\n url_c = \"https://www.googleapis.com/youtube/v3/channels\"\n\n for ind, chan in enumerate(chan_ids_list):\n try:\n querystring = {\"id\": chan, \"part\": part,\n \"key\": key}\n response = request_handler(self, url_c, params=querystring, wait=100)\n #print(response)\n # Error-handling\n if response.get('error'):\n print(response.get('error'))\n while response['error']['errors'][0]:\n key = keychange(self)\n \n querystring = {\"id\": chan, \"part\": part,\n \"key\": key}\n response = request_handler(self, url_c, params=querystring, wait=100)\n\n if response.get('error'):\n #chnl_details.update({chan:[str(response), response.text]})\n #\n if response['error']['errors'][0]['reason'] == 'keyInvalid':\n return [{chan:[str(response), response.text]}]\n break\n\n if response.get('Interneterror'):\n chnl_details.update({chan: str(response)})\n continue\n\n chnl_details[chan] = response['items']\n\n except Exception as e:\n print(e, traceback.format_exc())\n\n if ind % 100 == 0:\n print(ind)\n \n return chnl_details", "def get(self, channel):\n try:\n return self[channel.lower()]\n except KeyError:\n return None", "def channels(message):\n for channel in message._client.channels:\n if 'is_member' in channel:\n message.reply(\"{} ({})\".format(channel['name'], channel['id']))\n elif 'is_im' in channel:\n #print(channel)\n friendlyname = channel['user']\n try:\n friendlyname = channel['user'][\"name\"]\n except (KeyError, AttributeError):\n pass\n message.reply(\"User channel: {} ({})\".format(friendlyname,\n channel['id']))", "def test_api_read_channel(api):\n response = api.read_channel()\n assert \"name='request().json()'\" in repr(response)\n req_call = requests.request\n assert req_call.call_count == 1\n req_args = req_call.call_args[0]\n req_kw = req_call.call_args[1]\n assert req_args[0] == 'GET'\n assert req_args[1] == 'https://news-api.apple.com/channels/FAKE_CHANNEL'\n assert 'Authorization' in req_kw['headers']\n assert 'HHMAC; key=FAKE_ID; signature=' in req_kw['headers']['Authorization']\n assert req_kw['data'] is None", "def get_channels():\n\tchannels = slack.get_channels()\n\treturn jsonify(channels=channels.body['channels'])", "def get_group_info(self, data):\n return self.__form_call('channels.info', data)", "def return_type_channel(self):\n return {\n 'channel_id': self.channel_id,\n 'name': self.name\n }", "def getChannel(self, channel):\n channel = channel.lower()\n if channel in self.channels:\n return self.channels[channel]\n else:\n c = IrcChannel()\n self.channels[channel] = c\n return c", "def default_channel_response_data(channel):\n channel_record = Channel.objects.get(name=channel.name)\n return {\n \"title\": channel.title,\n \"name\": channel.name,\n \"description\": channel.description,\n \"public_description\": channel.public_description,\n \"channel_type\": channel.channel_type,\n \"user_is_contributor\": True,\n \"user_is_subscriber\": True,\n \"user_is_moderator\": False,\n \"link_type\": channel.link_type,\n \"membership_is_managed\": False,\n \"avatar\": None,\n \"avatar_small\": None,\n \"avatar_medium\": None,\n \"banner\": None,\n \"ga_tracking_id\": None,\n \"allowed_post_types\": [\n post_type\n for post_type, enabled in channel_record.allowed_post_types\n if enabled\n ],\n \"widget_list_id\": channel_record.widget_list_id,\n \"about\": None,\n \"moderator_notifications\": False,\n }", "async def get_channel(self, channel_id):\n channel = await self.client.fetch_channel(channel_id)\n return channel", "def channel(self):\n return Channel({'id': self.channel_id, 'connection': self.connection})", "def GetChannels(self, type='tv', group=2):\n self.logger.debug(\"Loading XBMC PVC channel list.\")\n try:\n xbmc = Server(self.url('/jsonrpc', True))\n return xbmc.PVR.GetChannels(channelgroupid=int(group), properties=['thumbnail'])\n except:\n return", "def channel_name(self) -> str:\n return self._channel_name", "def channel(self, row: Dict[str, str]) -> Union[int, str]:\n\n return row['Channel']", "def extract_channels(self, index: int) -> ListLike:\n cmd_pieces = self[index].split()\n channels = []\n for i, piece in enumerate(cmd_pieces):\n if piece in [\"--channel\", \"-c\"]:\n channels.append(cmd_pieces[i + 1])\n return channels", "def get_channels(self):\n response = self.client.api_call(\n f'conversations.list?types={cfg.CHANNEL[\"types\"]}&exclude_archived={cfg.CHANNEL[\"exclude_archived\"]}'\n )\n assert response['ok']\n return response['channels']", "def get_channel():\n try:\n page = int(request.args.get('page', 0))\n page_size = int(request.args.get('size', 10))\n search = request.args.get('search', '')\n except Exception as ex:\n return send_error(message='Parser params error')\n query = dict()\n if search and search != '':\n query['name'] = {\"$regex\": search}\n\n data = client.db.channel.find(query).skip(page_size * page).limit(page_size)\n totals = client.db.channel.count(query)\n data = list(data)\n for item in data:\n item['_id'] = str(item['_id'])\n strategy = client.db.strategy.find_one({'_id': ObjectId(item['strategy'])})\n if strategy is not None:\n strategy['_id'] = str(strategy['_id'])\n item['strategy'] = strategy\n\n return_data = dict(\n rows=data,\n totals=totals\n )\n return send_result(data=return_data)", "def api_get_channel_name(channel_id, api_service):\n request = api_service.channels().list(id=channel_id, part='snippet')\n success = False\n\n response = None\n\n while not success:\n\n try:\n response = request.execute()\n success = True\n\n except ConnectionResetError:\n print(\"ConnectionResetError: let me sleep for 5 seconds, just enough time to recover...\")\n sleep(5)\n\n name = response['items'][0]['snippet']['title']\n\n return name", "def _channels_list(self):\n result = self.slack.api_call(\"channels.list\")\n\n if not result.get(\"ok\"):\n logging.error(result['error'])\n return None\n\n return result['channels']", "def showChannels(self):\n print(\"Channels:\")\n for c in self.channels:\n if c.role != channel_pb2.Channel.Role.DISABLED:\n cStr = stripnl(MessageToJson(c.settings))\n print(\n f\" {channel_pb2.Channel.Role.Name(c.role)} psk={pskToString(c.settings.psk)} {cStr}\")\n publicURL = self.getURL(includeAll=False)\n adminURL = self.getURL(includeAll=True)\n print(f\"\\nPrimary channel URL: {publicURL}\")\n if adminURL != publicURL:\n print(f\"Complete URL (includes all channels): {adminURL}\")", "def cvp(self, id):\n\n server = Server.query.filter_by(cvp_uuid=id).first_or_404()\n server_details = murmur.get_server(server.mumble_host, server.mumble_instance)\n\n if server_details is not None:\n root_channel = server_details['parent_channel']\n sub_channels = server_details['sub_channels']\n\n # Iterate through channels to transform json response to cvp specification\n for i in sub_channels:\n i['description'] = i['c']['description']\n i['id'] = i['c']['id']\n i['links'] = i['c']['links']\n i['name'] = i['c']['name']\n i['parent'] = i['c']['parent']\n i['position'] = i['c']['position']\n i['temporary'] = i['c']['temporary']\n i['channels'] = i.pop('children')\n i['x_connecturl'] = \"mumble://%s:%i\" % (server.mumble_host, server_details['port'])\n\n i.pop(\"c\", None)\n # Iterate through channels' sub-channels to transform json response to cvp specification\n for j in i['channels']:\n j['description'] = j['c']['description']\n j['id'] = j['c']['id']\n j['links'] = j['c']['links']\n j['name'] = j['c']['name']\n j['parent'] = j['c']['parent']\n j['position'] = j['c']['position']\n j['temporary'] = j['c']['temporary']\n j['x_connecturl'] = \"mumble://%s:%i\" % (server.mumble_host, server_details['port'])\n j.pop(\"c\", None)\n j['channels'] = j.pop('children')\n\n # More reforming of json data to CVP spec.\n root_channel['channels'] = sub_channels\n root_channel['users'] = server_details['users']\n\n # Prepare json response context\n cvp = {\n 'root': root_channel,\n 'id': server_details['id'],\n 'name': server_details['name'],\n \"x_connecturl\": \"mumble://%s:%i\" % (server.mumble_host, server_details['port']),\n 'x_uptime': server_details['uptime']\n }\n return Response(json.dumps(cvp, sort_keys=True, indent=4), mimetype='application/json')\n\n else:\n return jsonify({'code': 404, 'message': 'Not Found'})", "def get_channel_ID(self):\n return self._CHANNEL_ID", "def __parse_channel_id(self, data):\n if 'channel_id' in data:\n return data['channel_id']\n if 'channel' in data:\n return data['channel']['id']\n return None", "def get_channel(self, c):\n channel = self.binding.get_switcher_channel()\n return int(channel);", "def channel_details(token, channel_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n\n # check if user is a member of channel with channel_ID and return AccessError if not\n if is_user_channel_member(channel_id, curr_id) is False:\n raise error.AccessError(description=\"\"\"You must be a member of the channel to view its\n details.\"\"\")\n\n # now we return the name, owners and members of the channel\n details = {\"name\": \"\", \"owner_members\": [], \"all_members\": []}\n # for owner/all_members we need a list of dictionaries containing u_id, first name and last name\n # {\"u_id\": [], \"name_first\": \"\", \"name_last\": \"\"}\n\n channel = database.get_channel(channel_id)\n members = database.get_channel_data(channel_id)\n\n details[\"name\"] = channel[\"name\"]\n for user_id in members[\"owner_ids\"]:\n\n owner_id = user_id\n user_data = database.get_user_data(owner_id)\n name_first = user_data[\"name_first\"]\n name_last = user_data[\"name_last\"]\n profile_img_url = user_data['profile_img_url']\n details[\"owner_members\"].append({\"u_id\": owner_id, \"name_first\": name_first,\n \"name_last\": name_last,\n \"profile_img_url\": profile_img_url})\n\n for user_id in members[\"member_ids\"]:\n member_id = user_id\n user_data = database.get_user_data(member_id)\n name_first = user_data[\"name_first\"]\n name_last = user_data[\"name_last\"]\n profile_img_url = user_data['profile_img_url']\n details[\"all_members\"].append({\"u_id\": member_id, \"name_first\": name_first,\n \"name_last\": name_last,\n \"profile_img_url\": profile_img_url})\n\n return details", "def getRemoteHost():", "def get_channels_json(self):\n logging.debug(f\"Getting all Slack channels...\")\n return self.get_list_json(\"conversations\")[\"channels\"]", "def get_video_channel_name(self, response):\n return response.css(\"div.yt-user-info\")\\\n .extract_first(default='')", "def open_channel(config_file):\n server_ip, port, channel_name, content_type = get_channel_config(config_file)\n channel = PresenterChannel(server_ip, port, channel_name, content_type)\n ret = channel.startup()\n if ret:\n log_error(\"ERROR:Open channel failed\")\n return None\n return channel", "def read_channel(self, ch):\n ...", "def channel_list(self):\n return_str = self.scpi.query_channel_catalog().split(',')\n channel_dct = {}\n for i in range(int(len(return_str)/2)):\n channel_dct[int(return_str[2 * i])] = return_str[2 * i + 1]\n return channel_dct", "def extractOneChannel( self, nNumChannel = 0 ):\n if( nNumChannel >= self.nNbrChannel ):\n logging.error( \"You ask for a non existing channel, returning empty data list (nbr channel: %d, asked: %d)\" %(self.nNbrChannel,nNumChannel) )\n return []\n return self.data[nNumChannel::self.nNbrChannel]", "def my_channels(_response=Response, _db=Depends(get_db), Authorization=Header(None)):\n\n stat, auth_data = verification_details(Authorization)\n\n if stat != 200:\n _response.status_code = 500\n return {\"data\": \"something happened\"}\n\n res_status, _data = ChatController(_db).get_my_channels(\n auth_data[\"data\"][\"user\"][\"username\"]\n )\n\n _response.status_code = res_status\n\n return {\"data\": _data}", "def get_channel_data(self, url, proxies=None):\n html_rsp = self._get_url_wrapper(url, proxies=proxies)\n if not html_rsp:\n return False\n data_dict = self._extract_channel_data(html_rsp)\n data_dict['channel_url'] = url\n return data_dict", "def part(self, channel):\n raise NotImplementedError", "def gethostbyceleryname(name):\n consumer = None\n channel = None\n ip = None \n\n ret = robust_cmd(['sudo', 'rabbitmqctl', 'list_consumers', '-q'], max_attempts=5)\n if ret and 'output' in ret:\n for c in ret['output'].split('\\n'):\n if name in c:\n consumer=c.split('\\t')[1]\n break\n if consumer == None:\n return None\n\n ret = robust_cmd(['sudo', 'rabbitmqctl', 'list_channels', 'pid', 'connection', 'number', '-q'], max_attempts=5)\n if ret and 'output' in ret:\n for ch in ret['output'].split('\\n'):\n if consumer in ch:\n channel=ch.split('\\t')[1] \n break\n if channel == None:\n return None\n\n ret = robust_cmd(['sudo', 'rabbitmqctl', 'list_connections', 'pid', 'peer_host', '-q'], max_attempts=5)\n if ret and 'output' in ret:\n for con in ret['output'].split('\\n'):\n if channel in con:\n ip=con.split('\\t')[1]\n break\n if ip == None:\n return None\n\n return ip", "def channel(self) -> Channel:\n return self._channel", "def get(self, public_id):\n channel = get_channel_state(public_id)\n if not channel:\n api.abort(404)\n else:\n return channel", "def default_channel(self) -> int:\r\n ...", "def get_channels(self):\n bus_name = self.telepathy_conn.requested_bus_name\n connection_path = self.telepathy_conn.object_path\n channels = [self.telepathy_text_chan.object_path,\n self.telepathy_tubes_chan.object_path]\n\n print('%r: bus name is %s, connection is %s, channels are %r',\n self, bus_name, connection_path, channels)\n return bus_name, connection_path, channels", "def channels(self):\n return self._channels", "def channel(self) -> Optional[pulumi.Input['GatewayAPIConfigChannel']]:\n return pulumi.get(self, \"channel\")", "def get_channel(self, channel_id):\n try:\n return self[channel_id]\n except KeyError:\n raise ChannelNotFoundError(\"Channel {} not found\".format(channel_id))", "def getChannelInfo( self, channel ):\n d = self.dcDict\n for dev in d:\n for devChannel in d[dev]['devChannels']:\n if d[dev]['devChannels'][devChannel]['channel'] == channel: return ( dev, devChannel )", "def channel(self):\n if not hasattr(self, '_channel'):\n self._channel = self.new_channel()\n return self._channel", "async def get_vote_channel(self, guild: discord.Guild):\n\n vote_channels = [\n ch for ch in guild.channels\n if \"voting\" in ch.name\n or \"vote\" in ch.name\n ]\n\n if len(vote_channels) < 1:\n return _(\n \"I couldn't identify a voting channel.\"\n \" Please specify one explicitly.\"\n )\n\n if len(vote_channels) > 1:\n # get channel with the largest suffixed number\n return max(\n vote_channels, key=lambda obj: int(obj.name.split(\"-\")[1])\n )\n\n else:\n return vote_channels[0]", "def getChannelByName(self, name):\n for c in (self.channels or []):\n if c.settings and c.settings.name == name:\n return c\n return None", "def channel_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"channel_id\")", "def get_userdata(self, server, channame, nick, datakey):\n skey = server.lower()\n ckey = irc.strings.lower(channame)\n data = None\n if skey in self.serverchans and ckey in self.serverchans[skey]:\n data = self.serverchans[skey][ckey].get_userdata(nick, datakey)\n return data", "def vis_channel(model, layer, channel_n):\n print('Getting vis for ' + layer + ', channel ' + str(channel_n))\n l_name = dla_lucid.LAYERS[layer][0]\n obj = objectives.channel(l_name, channel_n)\n imgs = render.render_vis(model, obj, dla_lucid.PARAM_1D,\n thresholds=dla_lucid.THRESH_1D, transforms=dla_lucid.TFORMS_1D, verbose=False)\n imgs_array = np.array(imgs)\n imgs_reshaped = imgs_array.reshape(400)\n return imgs_reshaped", "def channel_id(self):\n return self._channel_id", "def get_channel_ID(self):\n return self.CHC.get_channel_ID()", "async def used_channels(request: web.Request) -> web.Response:\n\n session_factory = get_session_factory_from_request(request)\n\n with session_factory() as session:\n channels = await get_channels(session)\n\n response = web.json_response({\"channels\": channels})\n response.enable_compression()\n return response", "def get_channel_number(self):\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True\n\n\t\ttry:\n\t\t\treturn int(self.keyinfo['channel_id'].attrs['channel_number'])\n\t\texcept:\n\t\t\tpass\n\n\t\ttry:\n\t\t\treturn int(self.keyinfo['read_id'].attrs['channel_number'])\n\t\texcept:\n\t\t\treturn None", "def channel(self) -> Optional[pulumi.Input['ReleaseChannelChannel']]:\n return pulumi.get(self, \"channel\")", "def channels(self):\r\n return v3.Channels(self)", "def get_channel_frame(self, channel=None, create=True):\n if channel is None:\n if self._cur_channel is None:\n channel = _k.SERVER_CHANNEL\n else:\n channel = self._cur_channel\n if channel in self._channel_frames:\n f = self._channel_frames[channel]\n elif create:\n f = ChannelFrame(self, channel)\n self._channel_frames[channel] = f\n self.refresh_chanlist()\n else:\n f = self.get_channel_frame(_k.SERVER_CHANNEL)\n if self._cur_channel is None:\n self._cur_channel = channel\n f.pack(side=Tix.LEFT, fill=Tix.BOTH, expand=True)\n return f", "def get_list_youtube_channels_check(self):\n return self.bot_data_file[\"youtube\"][\"channels\"]", "async def _list(self, ctx):\n config = await self.config.guild(ctx.guild).channels()\n data = [self.bot.get_channel(x).mention for x in config]\n if ctx.channel.id in config:\n destination = ctx.author\n else:\n destination = ctx\n if not data:\n return await destination.send(\"There are no channels.\")\n await destination.send(\", \".join(data))", "def getChannelsByName(self, unit, channels): \n\t\treturn self.selectChannelsByName(unit, channels, dontSelect = 1)", "def GetChannelGroups(self, type='tv'):\n self.logger.debug(\"Loading XBMC PVC channel list.\")\n try:\n xbmc = Server(self.url('/jsonrpc', True))\n return xbmc.PVR.GetChannelGroups(channeltype=type)\n except ValueError:\n return", "def get_channels(self):\n return [self.afos, \"%s...\" % (self.afos[:3], )]", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")", "def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")" ]
[ "0.65134233", "0.643602", "0.6210229", "0.619519", "0.6009834", "0.5932588", "0.58862126", "0.58620787", "0.5847913", "0.5847913", "0.584146", "0.58371985", "0.58296996", "0.5799957", "0.57254803", "0.57109845", "0.57049286", "0.5669257", "0.5649315", "0.5622001", "0.5602927", "0.5602927", "0.56024", "0.560007", "0.55995256", "0.5590534", "0.55889165", "0.5565141", "0.55267626", "0.55061245", "0.549975", "0.54700184", "0.5462763", "0.5455765", "0.5450635", "0.54273814", "0.54050267", "0.54024243", "0.54006666", "0.5386068", "0.53811955", "0.53586304", "0.53516346", "0.5343166", "0.53388524", "0.5335826", "0.5333314", "0.53104424", "0.5308874", "0.53078854", "0.52918464", "0.52852815", "0.52850026", "0.5282948", "0.52820176", "0.52799803", "0.5274718", "0.52736586", "0.52699006", "0.52643365", "0.5262171", "0.52601945", "0.52444917", "0.5243071", "0.5236216", "0.52358097", "0.5227667", "0.5204991", "0.5202057", "0.5198733", "0.519208", "0.517899", "0.51735747", "0.5158945", "0.5146616", "0.5141592", "0.5120178", "0.51161987", "0.5115307", "0.5111952", "0.51119345", "0.5103933", "0.5101674", "0.510167", "0.5094682", "0.5094466", "0.50914305", "0.50900847", "0.50900847", "0.50900847", "0.50900847", "0.50900847", "0.50900847", "0.50900847", "0.50900847", "0.50900847", "0.50900847", "0.50900847", "0.50900847", "0.50900847" ]
0.74030155
0
according to config open channel
def open_channel(config_file): server_ip, port, channel_name, content_type = get_channel_config(config_file) channel = PresenterChannel(server_ip, port, channel_name, content_type) ret = channel.startup() if ret: log_error("ERROR:Open channel failed") return None return channel
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def default_channel(self) -> int:\r\n ...", "def open_channel(self):\n # LOGGER.info('Creating a new channel')\n self._connection.channel(on_open_callback=self.on_channel_task_open)\n self._connection.channel(on_open_callback=self.on_channel_ctrl_open)", "def channel_open(self):\n self._chan = self._session.invoke_shell()", "async def channel(self, ctx):\n pass", "def single_channel():\n return True", "def channel(self):\n\n self._channel = self._connection.channel()\n print(\"Channel opened...\")", "def channelOpen(self, specificData):\n log.msg('opened forwarding channel %s to %s:%s' % (self.id, self.host, self.port))\n self._connectDone()", "def _open(self):\n if self.channel is None:\n self.channel = self.transport.open_session()\n\n return self.channel", "def read_channel(self, ch):\n ...", "def read_channel(self, channel: int, /) -> int:", "def open_channel(self):\n self.logger.info('Creating a new channel')\n self._connection.channel(on_open_callback=self.on_channel_open)", "def channel(self):\n raise NotImplementedError", "def open_channel(self):\n logger.info('Creating a new channel')\n self._connection.channel(on_open_callback=self.on_channel_open)", "def open_channel(self):\n self.logger.info('creating channel')\n self._connection.channel(on_open_callback=self.on_channel_opened)", "def on_channel_change(self, new_channel):\n pass", "def test_switch_channels(self):\n\t\t# not available yet, experimental\n\t\tpass", "def test_open_via_channel(testchannel, callit):\n\n channel = testchannel.channel() if callit else testchannel.channel\n\n with channel as t:\n assert t.state == ChannelState.open\n\n assert testchannel.state == ChannelState.closed", "def test_open_state(testchannel):\n\n with testchannel.open() as t:\n assert t.state == ChannelState.open\n\n assert testchannel.state == ChannelState.closed", "def on_channel_open(self, new_channel):\n\t\tself.channel = new_channel\n\t\tself.declare_queue(self.on_queue_declared)", "def on_connected(self, connection):\n\t\t# Open a channel\n\t\tconnection.channel(self.on_channel_open)", "def _on_channel_open(self, channel_id: str) -> None:\n self._send_alive(channel_id)", "def process_IN_OPEN(self, event):", "def check_channel_request(self, kind, chanid):\n if kind == 'session':\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def openCircuit(srv):", "def on_connection_open(self, unused_connection):\n self.logger.info('Connection established!')\n self.open_channel()", "def channel_connected(self):\n self.update_status()", "def addchan(channel):", "def getChannel(self):\r\n return self.channel", "def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def onOpen(self):", "def open_channel(from_name, to_name):\n sc = SecureChannel(from_name, to_name)\n sc.network_factory = NetworkFactory(CONFIG_FILE)\n return sc", "def test_default(self):\n acquire_channel = AcquireChannel(123)\n\n self.assertEqual(acquire_channel.index, 123)\n self.assertEqual(acquire_channel.name, \"a123\")", "def on_connected(connection):\n # open a channel\n connection.channel(on_open_channel)", "def is_open(self, channel=None):\n return self.get_state(channel) == 2", "def is_channel(self):\n return True", "def open(self):", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False", "def verify_and_respond_open_channel_from_remote_and_send_config_req(self, psm=0x33):\n request = L2capCaptures.ConnectionRequest(psm)\n assertThat(self.control_channel).emits(request)\n\n sid = request.get().GetIdentifier()\n dcid = request.get().GetSourceCid()\n scid = dcid\n channel = CertL2capChannel(self._device, scid, dcid, self._acl.acl_stream, self._acl, self.control_channel)\n self.scid_to_channel[scid] = channel\n\n # Connection response and config request combo packet\n conn_rsp_and_config_req = RawBuilder([\n 0x03, sid, 0x08, 0x00, dcid, 0x00, dcid, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, sid + 1, 0x04, 0x00, dcid,\n 0x00, 0x00, 0x00\n ])\n self.control_channel.send(conn_rsp_and_config_req)\n\n return channel", "def on_connection_open(self, unused_connection):\n logger.info('Connection opened')\n self.add_on_connection_close_callback()\n self.open_channel()", "def on_connection_open(self, connection):\n self.logger.debug(\"Connection opened: %s\", connection)\n self.open_channel()", "def open_channel(self, timeout=120):\n if self.get_channel_count() < self.MAX_CHANNELS and self.is_connected():\n try:\n channel = self.get_client().invoke_shell()\n channel.settimeout(timeout)\n self.add_channel(channel)\n return channel\n except error as e:\n print(\"opening channel error\")\n self._error = e\n # return None", "def test_channel_definition(self):\n TopoObj('topo', data, channels=channels)", "def test_open_alreadyopen(testchannel, state):\n\n testchannel._state = state\n with pytest.raises(ChannelOpenError):\n testchannel.open()", "def on_connection_open(self, unused_conncetion):\n self.logger.info('connection opened, adding connection close callback')\n self._connection.add_on_close_callback(self.on_connection_closed)\n self.open_channel()", "async def managechannels(self, ctx:commands.Context):", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False", "def connect():", "def on_connected(connection):\n # Open a channel\n connection.channel(on_channel_open)", "async def _msgvote_on(self, ctx):\n\n channel = ctx.message.channel\n if channel.id in self.settings[\"channels_enabled\"]:\n await self.bot.say(\"Msgvote mode is already on in this channel.\")\n else:\n self.settings[\"channels_enabled\"].append(channel.id)\n dataIO.save_json(self.settings_path, self.settings)\n await self.bot.say(\"Msgvote mode is now on in this channel.\")", "def open(self, irc, msg, args):\n status = urlopen(\"http://portal.shack:8088/status\").read()\n status = json.loads(status)\n \n if status['status'] == 'open':\n irc.reply(\"shack is open.\", prefixNick=False)\n elif status['status'] == 'closed':\n irc.reply(\"shack is closed.\", prefixNick=False)\n else:\n irc.reply(random.choice(self.dunno), prefixNick=False)", "def handle_config_change(self, msg):\n self.xmpp.event('groupchat_config_status', msg)\n self.xmpp.event('muc::%s::config_status' % msg['from'].bare , msg)", "async def defchannel(self, ctx, channel: str):\n self.data_check(ctx)\n server = ctx.message.server\n\n self.riceCog2[server.id][\"defchannel\"] = channel\n dataIO.save_json(self.warning_settings,\n self.riceCog2)\n await self.bot.say(\"Log channel is now: **{}**\".format(channel))", "def selectChannel(self,asic,chan, hsmode= 1 ):\n pass", "def signedOn(self):\n # create a session to respond to private messages from nicks\n # not in any channel I'm in\n\n self.ircNetwork = u'TODO' # TODO \n\n self.defaultSession = self.store.find(d20session.D20Session,\n d20session.D20Session.name == u'#@@default@@').one()\n self.defaultSession.isDefaultSession = True\n # join my default channel\n self.join(self.factory.channel)", "def open(self):\r\n pass", "def open(self):\r\n pass", "async def _list(self, ctx):\n config = await self.config.guild(ctx.guild).channels()\n data = [self.bot.get_channel(x).mention for x in config]\n if ctx.channel.id in config:\n destination = ctx.author\n else:\n destination = ctx\n if not data:\n return await destination.send(\"There are no channels.\")\n await destination.send(\", \".join(data))", "def on_channel_open(self, channel):\n self.logger.info('Channel opened')\n self._channel = channel\n self.add_on_channel_close_callback()", "def change_channel():\n global interface\n\n print(\"Change channels for interface {}\".format(interface))\n channel_number = 1\n\n while True:\n system(f\"iwconfig {interface} channel {channel_number}\")\n channel_number = channel_number % 14 + 1\n sleep(0.5)", "def configure(self):\n\t\tself.outChannel = CAClient(self.pvstring + \".AOUT\")\n\t\tself.outChannel.configure()\n\t\tself.inChannel = CAClient(self.pvstring + \".TINP\")\n\t\tself.inChannel.configure()", "def on_channel_open(self, channel):\n logger.info('Channel opened')\n self._channel = channel\n self._channel.basic_qos(prefetch_count=\n self.DEFAULT_PREFETCH_COUNT)\n self.add_on_channel_close_callback()\n self.setup_queues_and_bindings()", "def open(self) -> None:", "def open(self) -> None:", "def open(self) -> None:", "def whenReadReady(self, channel, call):", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def onOpen(self):\n logger.log_msg(\"Discord connection established.\")\n self.factory.bot = self\n\n self.init_session(\"discord\", \"discord.gg\", self.factory.sessionhandler)\n self.uid = int(self.factory.uid)\n self.logged_in = True\n self.sessionhandler.connect(self)", "def is_open(self, channel=None):\n return self.get_state(channel)", "async def do_config():\n\n\n async def cfg_add():\n try:\n if config[message.server.id]:\n await bot.send_message(c, 'The channel is already configured.')\n return\n except:\n config[message.server.id] = {\n 'server_api_1': '',\n 'server_api_2': '',\n 'welcome_msg enabled': False,\n 'welcome_msg': '',\n }\n\n async def cfg_ip():\n ip = message.content.split()[2]\n config[message.server.id]['server_api_1'] = f'https://mcapi.us/server/status?ip={ip}'\n config[message.server.id]['server_api_2'] = f'https://eu.mc-api.net/v3/server/ping/{ip}'\n\n async def cfg_port():\n port = message.content.split()[2]\n try:\n config.get(message.server.id, 'server_api_1')\n config.get(message.server.id, 'server_api_2')\n except:\n await bot.send_message(c, 'IP not configured.')\n return\n config[message.server.id]['server_api_1'] += f'&port={port}'\n config[message.server.id]['server_api_2'] += f'%3A{port}'\n\n async def cfg_wmsg():\n msg = message.content.split('wmsg')[1].strip()\n config[message.server.id]['welcome_msg'] = msg\n\n subcommands = {\n 'add': cfg_add,\n 'ip': cfg_ip,\n 'port': cfg_port,\n 'wmsg': cfg_wmsg,\n }\n\n appinfo = await bot.application_info()\n\n if message.author.display_name != appinfo.owner.name and not message.author.server_permissions.manage_server:\n await bot.send_message(c, 'You don\\'t have the proper permissions to configure this bot.')\n return\n\n for i in subcommands:\n if message.content.split()[1] == i:\n await subcommands[i]()\n with open('dbot_config.ini', 'w') as f:\n config.write(f)\n await bot.send_message(c, 'Success.')", "def op(self,nick):\n self.logger.debug(\"giving ops to %s\" % nick)\n self.connection.mode(self.config[\"IRC/channel\"],\"+o \"+nick)", "def on_open(self, wsobj, message=None):\n if not message:\n wsobj.send(json.dumps({\"op\": \"C\"}))\n else:\n if message.get(\"op\") == \"C\" and message.get(\"sid\"):\n msg = {\"op\": \"S\", \"sid\": message[\"sid\"], \"tc\": self.channel}\n wsobj.send(json.dumps(msg))", "def open(self, transport_config, options, protocol_class=None):", "def _sendModeChange(self, msg, args=\"\", target=None):\n if target is None:\n target = \"#chan\"\n message = \":Wolf!~wolf@yok.utu.fi MODE {} {} {}\\r\\n\".format(target, msg, args)\n self.client.dataReceived(message)", "def test_channel_list1():\n reset_data()\n user1 = auth_register(\"123eff45\", \"xxx\", \"yyyy\", email=\"hi@gmail.com\")\n owner1 = auth_register(\"123eff45\", \"xxx\", \"yyyy\", email=\"hii@gmail.com\")\n channel1_1 = channels_create(owner1['token'], \"channel1\", True)['channel_id']\n channel_join(user1['token'], channel1_1)\n channel_list1 = channels_list(user1['token'])\n channels = [channel['channel_id'] for channel in channel_list1]\n assert channels == [channel1_1]\n print(\"=========pass test1 : only one channel in channel_list========\")", "def open( self ):\n pass", "def _connect(self):\n hostport = self.getHost()\n channelOpenData = forwarding.packOpen_direct_tcpip((self.host, self.port), (hostport.host, hostport.port))\n self.connector.connection.openChannel(self, channelOpenData)", "def Open(self):\n if self.writer is None:\n self.writer = self.IO.Open(self.channel_name, adios2.Mode.Write)", "def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False", "async def _cmdf_setchannel(self, substr, msg, privilege_level):\n ch_obj = None\n if len(substr) == 0:\n ch_obj = msg.channel\n else:\n ch_obj = self._client.search_for_channel(substr, enablenamesearch=True, serverrestriction=self._server)\n\n if ch_obj is None:\n buf = \"**Error:** Channel not found. No changes were made.\"\n else:\n self._ch_msg_channelid = ch_obj.id\n self._save_settings()\n buf = \"In-channel greeting messages will now be sent in \" + utils.ch_to_mention(ch_obj) + \".\"\n await self._client.send_msg(msg, buf)\n return", "def connection(self):\n pass", "def on_open_channel(new_channel):\n # assign new channel to the global channel variable\n global channel\n channel = new_channel\n\n # channel is assigned and declare a queue named scripbox.\n # queue Properties - durable is True so that the queue withstands rabbitmq reboot\n # Pass a callback on_queue_declared which fires when a queue declaration\n # is successful\n channel.queue_declare(queue='scripbox', durable=True,\n auto_delete=False, callback=on_queue_declared)", "def on_chat_open(self, request, trigger_context):\n raise NotImplementedError", "def test_channel_join_private_global():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"johnsmith@gmail.com\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"harrypotter@gmail.com\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token2, \"Chill Soc\", False)\n\n\n # Global DREAM owner attempt to join a private channel \n channel_join_v2(auth_token1, channel_id1[\"channel_id\"])\n\n # Check if the global owner successfully join private channel\n assert channels_list_v2(auth_token1) == {\n 'channels': [\n \t{\n \t\t'channel_id': 1, # channel id start at 1 or 0 is worth checking ? It's currently start at 1.\n \t\t'name': 'Chill Soc',\n \t}\n ],\n }", "def _channelList_changed(self):\n self.oscilloscope.visibleChannels = self.channelList", "def on_channel_ctrl_open(self, channel):\n # LOGGER.info('Channel opened')\n self._channel_ctrl = channel\n self._channel_ctrl.add_on_close_callback(self.on_channel_closed)\n self._channel_ctrl.basic_qos(prefetch_count=1)\n self.setup_exchange()", "def showChannels(self):\n print(\"Channels:\")\n for c in self.channels:\n if c.role != channel_pb2.Channel.Role.DISABLED:\n cStr = stripnl(MessageToJson(c.settings))\n print(\n f\" {channel_pb2.Channel.Role.Name(c.role)} psk={pskToString(c.settings.psk)} {cStr}\")\n publicURL = self.getURL(includeAll=False)\n adminURL = self.getURL(includeAll=True)\n print(f\"\\nPrimary channel URL: {publicURL}\")\n if adminURL != publicURL:\n print(f\"Complete URL (includes all channels): {adminURL}\")", "def hop_channel(self, channel):\n self.logger.info(\"Hopping to channel %s\", channel)\n os.system(f\"iwconfig {self.interface} channel {channel}\")", "def on_open_handler(self, instmt, ws):\n Logger.info(self.__class__.__name__, \"Instrument %s is subscribed in channel %s\" % \\\n (instmt.get_instmt_name(), instmt.get_exchange_name()))\n if not instmt.get_subscribed():\n Logger.info(self.__class__.__name__, 'order book string:{}'.format(self.api_socket.get_order_book_subscription_string(instmt)))\n Logger.info(self.__class__.__name__, 'trade string:{}'.format(self.api_socket.get_trades_subscription_string(instmt)))\n ws.send(self.api_socket.get_order_book_subscription_string(instmt))\n ws.send(self.api_socket.get_trades_subscription_string(instmt))\n instmt.set_subscribed(True)", "def __init__(self):\n self.connection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost'))\n self.channel = self.connection.channel()", "def connect(self):\n\t\tpass", "def on_open(self) -> None:\n\n channel = [{\"name\": \"level2\", \"product_ids\": list(self.products.keys())}]\n msg_subscribe = {\"type\": \"subscribe\", \"channels\": channel}\n\n subscribe_payload = json.dumps(msg_subscribe)\n self.ws.send(subscribe_payload)", "def connect(self):", "def connect(self):", "def connect_to_master():" ]
[ "0.6495685", "0.6411523", "0.6357903", "0.6315835", "0.6212122", "0.6187699", "0.6173837", "0.61588097", "0.61560345", "0.6084364", "0.60564893", "0.60467905", "0.60155", "0.6004395", "0.5985936", "0.59202945", "0.58966035", "0.58927876", "0.5869291", "0.5857404", "0.5834471", "0.58248323", "0.58112806", "0.5778409", "0.5771946", "0.57576776", "0.57515824", "0.57435477", "0.57195735", "0.56471777", "0.5635504", "0.5631845", "0.56275773", "0.56120646", "0.55928445", "0.55881774", "0.5570651", "0.5564368", "0.5551192", "0.5538337", "0.55299604", "0.5512658", "0.55121124", "0.5510943", "0.5505115", "0.5495511", "0.5494246", "0.5490683", "0.5487853", "0.54772675", "0.5471648", "0.54692596", "0.54553", "0.5447813", "0.5440522", "0.5440522", "0.54355043", "0.54337794", "0.54310423", "0.54231036", "0.5419321", "0.54141355", "0.54141355", "0.54141355", "0.5412136", "0.5393953", "0.5393953", "0.5393953", "0.5393953", "0.5393953", "0.5393953", "0.5393953", "0.53925925", "0.5384643", "0.5367416", "0.5361262", "0.5354255", "0.53541845", "0.5350656", "0.5345291", "0.5334366", "0.5332519", "0.53323525", "0.533062", "0.5326948", "0.5321294", "0.53128785", "0.53101987", "0.5305615", "0.5301789", "0.5297909", "0.52859527", "0.52820104", "0.52812064", "0.52775264", "0.5276303", "0.52754617", "0.52703696", "0.52703696", "0.5269711" ]
0.5856543
20
Dial a model defined in Swagger
def __init__(self, caller=None, dialstatus=None, dialstring=None, forward=None, forwarded=None, peer=None): # noqa: E501 # noqa: E501 self._caller = None self._dialstatus = None self._dialstring = None self._forward = None self._forwarded = None self._peer = None self.discriminator = None if caller is not None: self.caller = caller self.dialstatus = dialstatus if dialstring is not None: self.dialstring = dialstring if forward is not None: self.forward = forward if forwarded is not None: self.forwarded = forwarded self.peer = peer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def opt_model_create_rest_api():\n request_json = request.get_json()\n OptimModelRequestAPI(request_json).validate()\n return create_model_data(request_json)", "def openapi(self) -> api.OpenAPISpec:\n return self._get_model(model=api.OpenAPISpec)", "def swagger_definition(self, base_path=None, **kwargs):\n return Swagger(\n {\n \"info\": Info(\n {\n key: kwargs.get(key, self.DEFAULT_INFO.get(key))\n for key in Info.fields.keys()\n if key in kwargs or key in self.DEFAULT_INFO\n }\n ),\n \"paths\": self.paths,\n \"swagger\": \"2.0\",\n \"basePath\": base_path,\n }\n ).to_primitive()", "def GetModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def swagger():\n return jsonify(current_app.spec.to_dict())", "def model(self) -> Type[Model]:", "def detail(model_id: str = typer.Argument(..., help='Model ID')):\n with requests.get(f'{app_settings.api_v1_prefix}/model/{model_id}') as r:\n data = r.json()\n model_detailed_view(MLModel.parse_obj(data))", "def expose_models(app, HOST=\"localhost\", PORT=5000, API_PREFIX=\"/api\"):\n\n api = SAFRSAPI(app, host=HOST, port=PORT)\n api.expose_object(models.Category)\n api.expose_object(models.CustomerCustomerDemo)\n api.expose_object(models.OrderDetail)\n api.expose_object(models.Order)\n api.expose_object(models.Customer)\n api.expose_object(models.CustomerDemographic)\n api.expose_object(models.EmployeeAudit)\n api.expose_object(models.EmployeeTerritory)\n api.expose_object(models.Employee)\n api.expose_object(models.Product)\n api.expose_object(models.Region)\n api.expose_object(models.Shipper)\n api.expose_object(models.Supplier)\n api.expose_object(models.Territory)\n return api", "def __init__(self, client):\n self.client = client\n self.definitions = client.swagger_spec.definitions", "def create_model(self, ApiId: str, Name: str, Schema: str, ContentType: str = None, Description: str = None) -> Dict:\n pass", "def model(self) -> 'outputs.ModelDefinitionResponse':\n return pulumi.get(self, \"model\")", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def test_swagger(self):\n response = self.client.get(\"/api/v1/swagger\", query_string=dict(validate_schema=True))\n assert_that(response.status_code, is_(equal_to(200)))\n swagger = loads(response.get_data().decode(\"utf-8\"))\n # we have the swagger docs endpoint too, which is implemented as a query.\n # ignore it here for now.\n del swagger[\"paths\"][\"/swagger/docs\"]\n assert_that(swagger[\"paths\"], is_(equal_to({\n \"/foo/get\": {\n \"get\": {\n \"description\": \"My doc string\",\n \"tags\": [\"foo\"],\n \"responses\": {\n \"default\": {\n \"description\": \"An error occurred\", \"schema\": {\n \"$ref\": \"#/definitions/Error\",\n }\n },\n \"200\": {\n \"description\": \"My doc string\",\n \"schema\": {\n \"$ref\": \"#/definitions/QueryResult\",\n }\n }\n },\n \"parameters\": [\n {\n \"in\": \"header\",\n \"name\": \"X-Response-Skip-Null\",\n \"required\": False,\n \"type\": \"string\",\n \"description\": \"Remove fields with null values from the response.\"\n },\n {\n \"required\": False,\n \"type\": \"string\",\n \"name\": \"optional_value\",\n \"in\": \"query\",\n },\n {\n \"required\": True,\n \"type\": \"string\",\n \"name\": \"required_value\",\n \"in\": \"query\",\n },\n ],\n \"operationId\": \"query\",\n }\n }\n })))", "def __init__(self):\n self.swagger_types = {\n 'id_conta': 'int',\n 'id_produto': 'int',\n 'id_pessoa': 'int',\n 'id_parentesco': 'int',\n 'tipo_portador': 'str',\n 'nome_impresso': 'str',\n 'id_tipo_cartao': 'int',\n 'flag_ativo': 'int',\n 'data_cadastro_portador': 'str',\n 'data_cancelamento_portador': 'str'\n }\n\n self.attribute_map = {\n 'id_conta': 'idConta',\n 'id_produto': 'idProduto',\n 'id_pessoa': 'idPessoa',\n 'id_parentesco': 'idParentesco',\n 'tipo_portador': 'tipoPortador',\n 'nome_impresso': 'nomeImpresso',\n 'id_tipo_cartao': 'idTipoCartao',\n 'flag_ativo': 'flagAtivo',\n 'data_cadastro_portador': 'dataCadastroPortador',\n 'data_cancelamento_portador': 'dataCancelamentoPortador'\n }\n\n self._id_conta = None\n self._id_produto = None\n self._id_pessoa = None\n self._id_parentesco = None\n self._tipo_portador = None\n self._nome_impresso = None\n self._id_tipo_cartao = None\n self._flag_ativo = None\n self._data_cadastro_portador = None\n self._data_cancelamento_portador = None", "def api_documentation(api: str, summary: str, in_model: BaseModel,\n out_model: BaseModel, out_description: str) -> Callable:\n for model, name in ((in_model, 'Input'), (out_model, 'Output')):\n doc.Object(\n make_dataclass(\n f'Api{api[1:].title()}{name}',\n [(key, val.type_, val.type_)\n for key, val in model.__dict__['__fields__'].items()]))\n im_returns = doc.JsonBody({\n key: val.type_\n for key, val in in_model.__dict__['__fields__'].items()\n })\n\n om_returns = {\n key: val.type_\n for key, val in out_model.__dict__['__fields__'].items()\n }\n\n def decorator(func):\n @doc.summary(summary)\n @doc.response(412,\n 'Error: Precondition Failed',\n description='The passed request-parameters are invalid')\n @doc.response(500,\n 'Error: Server-Error occured',\n description='An internal error occured')\n @doc.consumes(im_returns,\n content_type='application/json',\n location='body')\n @doc.produces(om_returns,\n content_type='application/json',\n description=out_description)\n @wraps(func)\n async def function_wrapper(request, *args, **kwargs):\n return await func(request=request, *args, **kwargs)\n\n return function_wrapper\n\n return decorator", "def model(self) -> str:\n ...", "def describe_model(ModelName=None):\n pass", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'version': 'str',\n 'tagline': 'str',\n 'keywords': 'str',\n 'short_description': 'str',\n 'usage_information': 'str',\n 'long_description': 'str',\n 'license_model_description': 'str',\n 'system_requirements': 'str',\n 'time_released': 'datetime',\n 'release_notes': 'str',\n 'categories': 'list[str]',\n 'publisher': 'Publisher',\n 'languages': 'list[Item]',\n 'screenshots': 'list[Screenshot]',\n 'videos': 'list[NamedLink]',\n 'support_contacts': 'list[SupportContact]',\n 'support_links': 'list[NamedLink]',\n 'documentation_links': 'list[DocumentationLink]',\n 'icon': 'UploadData',\n 'banner': 'UploadData',\n 'regions': 'list[Region]',\n 'package_type': 'str',\n 'default_package_version': 'str',\n 'links': 'list[Link]',\n 'is_featured': 'bool'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'version': 'version',\n 'tagline': 'tagline',\n 'keywords': 'keywords',\n 'short_description': 'shortDescription',\n 'usage_information': 'usageInformation',\n 'long_description': 'longDescription',\n 'license_model_description': 'licenseModelDescription',\n 'system_requirements': 'systemRequirements',\n 'time_released': 'timeReleased',\n 'release_notes': 'releaseNotes',\n 'categories': 'categories',\n 'publisher': 'publisher',\n 'languages': 'languages',\n 'screenshots': 'screenshots',\n 'videos': 'videos',\n 'support_contacts': 'supportContacts',\n 'support_links': 'supportLinks',\n 'documentation_links': 'documentationLinks',\n 'icon': 'icon',\n 'banner': 'banner',\n 'regions': 'regions',\n 'package_type': 'packageType',\n 'default_package_version': 'defaultPackageVersion',\n 'links': 'links',\n 'is_featured': 'isFeatured'\n }\n\n self._id = None\n self._name = None\n self._version = None\n self._tagline = None\n self._keywords = None\n self._short_description = None\n self._usage_information = None\n self._long_description = None\n self._license_model_description = None\n self._system_requirements = None\n self._time_released = None\n self._release_notes = None\n self._categories = None\n self._publisher = None\n self._languages = None\n self._screenshots = None\n self._videos = None\n self._support_contacts = None\n self._support_links = None\n self._documentation_links = None\n self._icon = None\n self._banner = None\n self._regions = None\n self._package_type = None\n self._default_package_version = None\n self._links = None\n self._is_featured = None", "def test_openapi_schema(app, client):\n response = client.get(\"/swagger/\")\n assert response.status_code == 200\n assert len(json.loads(response.data)[\"paths\"]) > 0", "def ApiFromDiscoveryDoc(self, path):\n\n f = open(os.path.join(os.path.dirname(__file__), 'testdata', path))\n discovery_doc = simplejson.loads(f.read())\n f.close()\n return Api(discovery_doc)", "def __init__(self):\n self.swagger_types = {\n 'owner_id': 'str',\n 'created_at': 'datetime',\n 'identifier': 'str',\n 'identifier_type': 'str',\n 'default_language': 'str',\n 'optional_identifier': 'str',\n 'id': 'str',\n 'v': 'float',\n 'id': 'str',\n 'case_records': 'list[str]'\n }\n\n self.attribute_map = {\n 'owner_id': '_ownerId',\n 'created_at': '_createdAt',\n 'identifier': 'identifier',\n 'identifier_type': 'identifierType',\n 'default_language': 'defaultLanguage',\n 'optional_identifier': 'optionalIdentifier',\n 'id': '_id',\n 'v': '__v',\n 'case_records': 'caseRecords'\n }\n\n self._owner_id = None\n self._created_at = None\n self._identifier = None\n self._identifier_type = None\n self._default_language = None\n self._optional_identifier = None\n self._id = None\n self._v = None\n self._id = None\n self._case_records = None", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'date_created': 'datetime',\n 'date_modified': 'datetime',\n 'version': 'int',\n 'division': 'DomainEntityRef',\n 'campaign_status': 'str',\n 'callable_time_set': 'DomainEntityRef',\n 'contact_list': 'DomainEntityRef',\n 'dnc_lists': 'list[DomainEntityRef]',\n 'always_running': 'bool',\n 'contact_sorts': 'list[ContactSort]',\n 'messages_per_minute': 'int',\n 'errors': 'list[RestErrorDetail]',\n 'sms_config': 'SmsConfig',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'date_created': 'dateCreated',\n 'date_modified': 'dateModified',\n 'version': 'version',\n 'division': 'division',\n 'campaign_status': 'campaignStatus',\n 'callable_time_set': 'callableTimeSet',\n 'contact_list': 'contactList',\n 'dnc_lists': 'dncLists',\n 'always_running': 'alwaysRunning',\n 'contact_sorts': 'contactSorts',\n 'messages_per_minute': 'messagesPerMinute',\n 'errors': 'errors',\n 'sms_config': 'smsConfig',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._date_created = None\n self._date_modified = None\n self._version = None\n self._division = None\n self._campaign_status = None\n self._callable_time_set = None\n self._contact_list = None\n self._dnc_lists = None\n self._always_running = None\n self._contact_sorts = None\n self._messages_per_minute = None\n self._errors = None\n self._sms_config = None\n self._self_uri = None", "def get_swagger_view(title=None, url=None, patterns=None, urlconf=None):\n class SwaggerSchemaView(APIView):\n _ignore_model_permissions = True\n exclude_from_schema = True\n permission_classes = [AllowAny]\n renderer_classes = [\n CoreJSONRenderer,\n renderers.OpenAPIRenderer,\n renderers.SwaggerUIRenderer\n ]\n\n def get(self, request):\n generator = CustomSchemaGenerator(\n title=title,\n url=url,\n patterns=patterns,\n urlconf=urlconf\n )\n schema = generator.get_schema(request=request, public=True)\n\n if not schema:\n raise exceptions.ValidationError(\n 'The schema generator did not return a schema Document'\n )\n\n return Response(schema)\n\n return SwaggerSchemaView.as_view()", "def get_swagger_view(title=None, url=None, patterns=None, urlconf=None):\n class SwaggerSchemaView(APIView):\n _ignore_model_permissions = True\n exclude_from_schema = True\n permission_classes = [AllowAny]\n renderer_classes = [\n CoreJSONRenderer,\n renderers.OpenAPIRenderer,\n renderers.SwaggerUIRenderer\n ]\n\n def get(self, request):\n generator = SchemaGenerator(\n title=title,\n url=url,\n patterns=patterns,\n urlconf=urlconf\n )\n schema = generator.get_schema(request=request)\n\n if not schema:\n raise exceptions.ValidationError(\n 'The schema generator did not return a schema Document'\n )\n\n return Response(schema)\n\n return SwaggerSchemaView.as_view()", "def resolve(self, spec: \"ModelSpec\"):", "def test_object_model_with_ref_props(self):\n from petstore_api.model import object_model_with_ref_props\n endpoint = self.api.object_model_with_ref_props\n assert endpoint.openapi_types['body'] == (object_model_with_ref_props.ObjectModelWithRefProps,)\n assert endpoint.settings['response_type'] == (object_model_with_ref_props.ObjectModelWithRefProps,)", "def __init__(self): # noqa: E501\n self.openapi_types = {\n }\n\n self.attribute_map = {\n }", "def get_model(self):\n return Doc()", "def P_GetModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'uses_git': 'bool',\n 'git_remote_url': 'str',\n 'git_username': 'str',\n 'git_password': 'str',\n 'git_username_user_attribute': 'str',\n 'git_password_user_attribute': 'str',\n 'git_service_name': 'str',\n 'deploy_secret': 'str',\n 'unset_deploy_secret': 'bool',\n 'pull_request_mode': 'str',\n 'validation_required': 'bool',\n 'allow_warnings': 'bool',\n 'is_example': 'bool',\n 'can': 'dict(str, bool)'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'uses_git': 'uses_git',\n 'git_remote_url': 'git_remote_url',\n 'git_username': 'git_username',\n 'git_password': 'git_password',\n 'git_username_user_attribute': 'git_username_user_attribute',\n 'git_password_user_attribute': 'git_password_user_attribute',\n 'git_service_name': 'git_service_name',\n 'deploy_secret': 'deploy_secret',\n 'unset_deploy_secret': 'unset_deploy_secret',\n 'pull_request_mode': 'pull_request_mode',\n 'validation_required': 'validation_required',\n 'allow_warnings': 'allow_warnings',\n 'is_example': 'is_example',\n 'can': 'can'\n }\n\n self._id = None\n self._name = None\n self._uses_git = None\n self._git_remote_url = None\n self._git_username = None\n self._git_password = None\n self._git_username_user_attribute = None\n self._git_password_user_attribute = None\n self._git_service_name = None\n self._deploy_secret = None\n self._unset_deploy_secret = None\n self._pull_request_mode = None\n self._validation_required = None\n self._allow_warnings = None\n self._is_example = None\n self._can = None", "def __init__(self, model: object):\n self.model = model", "def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n\n if not __name__ == cls.__module__:\n # e.g.: cls.__module__ = mpcontribs.api.projects.views\n views_path = cls.__module__.split(\".\")\n doc_path = \".\".join(views_path[:-1] + [\"document\"])\n cls.tags = [views_path[-2]]\n doc_filepath = doc_path.replace(\".\", os.sep) + \".py\"\n if os.path.exists(doc_filepath):\n cls.doc_name = cls.tags[0].capitalize()\n Model = getattr(import_module(doc_path), cls.doc_name)\n cls.schema_name = cls.doc_name + \"Schema\"\n cls.Schema = type(\n cls.schema_name,\n (ModelSchema, object),\n {\n \"Meta\": type(\n \"Meta\",\n (object,),\n dict(model=Model, ordered=True, model_build_obj=False),\n )\n },\n )\n cls.definitions = {cls.schema_name: schema2jsonschema(cls.Schema)}\n cls.resource.schema = cls.Schema\n\n # write flask-mongorest swagger specs\n for method in cls.methods:\n spec = get_specs(cls, method, cls.tags[0])\n if spec:\n dir_path = os.path.join(DOC_DIR, cls.tags[0])\n file_path = os.path.join(dir_path, method.__name__ + \".yml\")\n if not os.path.exists(file_path):\n os.makedirs(dir_path, exist_ok=True)\n\n if is_gunicorn:\n with open(file_path, \"w\") as f:\n yaml.dump(spec, f)\n logger.debug(\n f\"{cls.tags[0]}.{method.__name__} written to {file_path}\"\n )", "def GetModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def GetModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def api(self) -> str:", "def __init__(self):\n self.swagger_types = {\n 'id_conta': 'int',\n 'id_pessoa': 'int',\n 'id_cartao': 'int',\n 'id_bandeira': 'int',\n 'id_tipo_cartao': 'int',\n 'numero_cartao': 'str',\n 'nome_plastico': 'str',\n 'cvv2': 'str',\n 'data_geracao': 'str',\n 'data_validade': 'str',\n 'cpf': 'str',\n 'tipo_portador': 'str',\n 'trilha1': 'str',\n 'trilha2': 'str',\n 'trilha_cvv1': 'str',\n 'trilha_cvv2': 'str',\n 'flag_virtual': 'int',\n 'nome_bandeira': 'str',\n 'flag_titular': 'int',\n 'sequencial_cartao': 'int',\n 'id_status': 'int',\n 'descricao_status_cartao': 'str',\n 'data_status': 'str',\n 'id_estagio': 'int',\n 'descricao_estagio': 'str',\n 'data_estagio': 'str',\n 'numero_bin': 'str',\n 'id_produto': 'int',\n 'descricao_produto': 'str',\n 'id_status_conta': 'int',\n 'descricao_status_conta': 'int',\n 'data_embossing': 'str',\n 'codigo_desbloqueio': 'str',\n 'nome_pessoa': 'str',\n 'tipo_pessoa': 'str',\n 'data_nascimento': 'str',\n 'id_endereco': 'int',\n 'id_tipo_endereco': 'int',\n 'descricao_tipo_endereco': 'str',\n 'cep': 'str',\n 'logradouro': 'str',\n 'numero_endereco': 'str',\n 'complemento_endereco': 'str',\n 'bairro': 'str',\n 'cidade': 'str',\n 'uf': 'str',\n 'pais': 'str',\n 'senha_criptografada': 'str',\n 'icvv': 'str',\n 'id_status_impressao': 'int'\n }\n\n self.attribute_map = {\n 'id_conta': 'idConta',\n 'id_pessoa': 'idPessoa',\n 'id_cartao': 'idCartao',\n 'id_bandeira': 'idBandeira',\n 'id_tipo_cartao': 'idTipoCartao',\n 'numero_cartao': 'numeroCartao',\n 'nome_plastico': 'nomePlastico',\n 'cvv2': 'cvv2',\n 'data_geracao': 'dataGeracao',\n 'data_validade': 'dataValidade',\n 'cpf': 'cpf',\n 'tipo_portador': 'tipoPortador',\n 'trilha1': 'trilha1',\n 'trilha2': 'trilha2',\n 'trilha_cvv1': 'trilhaCVV1',\n 'trilha_cvv2': 'trilhaCVV2',\n 'flag_virtual': 'flagVirtual',\n 'nome_bandeira': 'nomeBandeira',\n 'flag_titular': 'flagTitular',\n 'sequencial_cartao': 'sequencialCartao',\n 'id_status': 'idStatus',\n 'descricao_status_cartao': 'descricaoStatusCartao',\n 'data_status': 'dataStatus',\n 'id_estagio': 'idEstagio',\n 'descricao_estagio': 'descricaoEstagio',\n 'data_estagio': 'dataEstagio',\n 'numero_bin': 'numeroBin',\n 'id_produto': 'idProduto',\n 'descricao_produto': 'descricaoProduto',\n 'id_status_conta': 'idStatusConta',\n 'descricao_status_conta': 'descricaoStatusConta',\n 'data_embossing': 'dataEmbossing',\n 'codigo_desbloqueio': 'codigoDesbloqueio',\n 'nome_pessoa': 'nomePessoa',\n 'tipo_pessoa': 'tipoPessoa',\n 'data_nascimento': 'dataNascimento',\n 'id_endereco': 'idEndereco',\n 'id_tipo_endereco': 'idTipoEndereco',\n 'descricao_tipo_endereco': 'descricaoTipoEndereco',\n 'cep': 'cep',\n 'logradouro': 'logradouro',\n 'numero_endereco': 'numeroEndereco',\n 'complemento_endereco': 'complementoEndereco',\n 'bairro': 'bairro',\n 'cidade': 'cidade',\n 'uf': 'uf',\n 'pais': 'pais',\n 'senha_criptografada': 'senhaCriptografada',\n 'icvv': 'icvv',\n 'id_status_impressao': 'idStatusImpressao'\n }\n\n self._id_conta = None\n self._id_pessoa = None\n self._id_cartao = None\n self._id_bandeira = None\n self._id_tipo_cartao = None\n self._numero_cartao = None\n self._nome_plastico = None\n self._cvv2 = None\n self._data_geracao = None\n self._data_validade = None\n self._cpf = None\n self._tipo_portador = None\n self._trilha1 = None\n self._trilha2 = None\n self._trilha_cvv1 = None\n self._trilha_cvv2 = None\n self._flag_virtual = None\n self._nome_bandeira = None\n self._flag_titular = None\n self._sequencial_cartao = None\n self._id_status = None\n self._descricao_status_cartao = None\n self._data_status = None\n self._id_estagio = None\n self._descricao_estagio = None\n self._data_estagio = None\n self._numero_bin = None\n self._id_produto = None\n self._descricao_produto = None\n self._id_status_conta = None\n self._descricao_status_conta = None\n self._data_embossing = None\n self._codigo_desbloqueio = None\n self._nome_pessoa = None\n self._tipo_pessoa = None\n self._data_nascimento = None\n self._id_endereco = None\n self._id_tipo_endereco = None\n self._descricao_tipo_endereco = None\n self._cep = None\n self._logradouro = None\n self._numero_endereco = None\n self._complemento_endereco = None\n self._bairro = None\n self._cidade = None\n self._uf = None\n self._pais = None\n self._senha_criptografada = None\n self._icvv = None\n self._id_status_impressao = None", "def model_definition(self):\n pass", "def model() -> Model:\n return Model()", "def test_add_model_get_docs(input_model):\n with patch_registry(), patch_parse_doc() as mock_parse_doc:\n swagger.add_model(input_model)\n mock_parse_doc.assert_called_once_with(input_model)", "def model_info():\n pass", "def get_openapi_spec(self):\n\n spec = {\"operationId\": snake_to_camel(self._wrapped_function.__name__), \"responses\": {}}\n\n if self._doc.short_description:\n spec[\"summary\"] = self._doc.short_description\n\n if self._doc.long_description:\n spec[\"description\"] = self._doc.long_description\n\n if self._tags:\n spec[\"tags\"] = self._tags\n\n if self._path_parameters or self._query_parameters:\n spec[\"parameters\"] = []\n\n for name, param_type in self._path_parameters.items():\n if self._is_param_ignored(name):\n continue\n\n param_spec = {\n \"name\": name,\n \"in\": \"path\",\n \"required\": True,\n \"schema\": {\"type\": self._extension.PARAMETER_TYPE_MAP.get(param_type, \"string\")},\n }\n\n param_doc = self._get_param_doc(name)\n if param_doc is not None:\n param_spec[\"description\"] = param_doc.description\n\n spec[\"parameters\"].append(param_spec)\n\n for name, param_type in self._query_parameters.items():\n param_refl: inspect.Parameter = self._signature.parameters[name]\n param_spec = {\n \"name\": name,\n \"in\": \"query\",\n \"required\": param_refl.default == inspect.Parameter.empty,\n \"schema\": {\"type\": self._extension.PARAMETER_TYPE_MAP.get(param_type, \"string\")},\n }\n\n param_doc = self._get_param_doc(name)\n if param_doc is not None:\n param_spec[\"description\"] = param_doc.description\n\n spec[\"parameters\"].append(param_spec)\n\n if self._request_body_parameter:\n mimetypes = self._request_body_content_types\n\n spec[\"requestBody\"] = {\n \"content\": {\n mimetype: {\"schema\": self._process_model_schema(self._request_body_class)} for mimetype in mimetypes\n },\n \"required\": True,\n }\n\n if issubclass(self._request_body_class, ExamplesMixin):\n for mimetype in mimetypes:\n spec[\"requestBody\"][\"content\"][mimetype][\"examples\"] = model_examples_to_openapi_dict(\n self._request_body_class\n )\n\n param_doc = self._get_param_doc(self._request_body_parameter)\n if param_doc is not None and param_doc.description:\n spec[\"requestBody\"][\"description\"] = param_doc.description\n\n spec[\"x-codegen-request-body-name\"] = \"body\"\n elif self._request_body_file_type:\n spec[\"requestBody\"] = {\n \"content\": {self._request_body_file_type: {\"schema\": {\"type\": \"string\", \"format\": \"binary\"}}}\n }\n\n if self._security:\n spec[\"security\"] = self._security\n\n for response_class, codes in self._responses.items():\n for code, response_data in codes.items():\n if issubclass(response_class, FileResponse):\n mime = response_data.mimetype or \"application/octet-stream\"\n spec[\"responses\"][str(code)] = {\n \"description\": response_data.description or response_class.__name__,\n \"content\": {mime: {\"schema\": {\"type\": \"string\", \"format\": \"binary\"}}},\n }\n else:\n spec[\"responses\"][str(code)] = {\n \"description\": response_data.description or response_class.__name__,\n \"content\": {\"application/json\": {\"schema\": self._process_model_schema(response_class)}},\n }\n\n if issubclass(response_class, ExamplesMixin):\n # fmt: off\n spec[\"responses\"][str(code)][\"content\"][\"application/json\"][\"examples\"] = \\\n model_examples_to_openapi_dict(response_class)\n # fmt: on\n\n return spec", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'ticket_id': 'str',\n 'type': 'str',\n 'from_number': 'str',\n 'from_name': 'str',\n 'to_number': 'str',\n 'to_name': 'str',\n 'via_number': 'str',\n 'date_created': 'datetime',\n 'date_answered': 'datetime',\n 'date_finished': 'datetime'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'ticket_id': 'ticketId',\n 'type': 'type',\n 'from_number': 'fromNumber',\n 'from_name': 'fromName',\n 'to_number': 'toNumber',\n 'to_name': 'toName',\n 'via_number': 'viaNumber',\n 'date_created': 'dateCreated',\n 'date_answered': 'dateAnswered',\n 'date_finished': 'dateFinished'\n }\n\n self._id = None\n self._ticket_id = None\n self._type = None\n self._from_number = None\n self._from_name = None\n self._to_number = None\n self._to_name = None\n self._via_number = None\n self._date_created = None\n self._date_answered = None\n self._date_finished = None", "def expose(self, model, route='/api', access_control=None, resource_class=Resource, **kwargs):\n endpoint_path = route + '/' + inflection.pluralize(inflection.underscore(model.__name__))\n endpoint = endpoint_path\n resource = Resource(model=model, access_control=access_control)\n self._add_api_method(endpoint_path, resource.list_,\n methods=['GET'], endpoint=endpoint + '/list')\n self._add_api_method('%s/<id>' % endpoint_path, resource.get_,\n methods=['GET'], endpoint=endpoint + '/get')\n\n self._add_api_method(endpoint_path, resource.put_,\n methods=['PUT'], endpoint=endpoint + '/put')\n\n self._add_api_method('%s/<id>' % endpoint_path, resource.delete_,\n methods=['DELETE'], endpoint=endpoint + '/delete')\n\n self._add_api_method(endpoint_path, resource.post_,\n methods=['POST'], endpoint=endpoint + 'post')\n\n self._add_api_method('%s/<id>' % endpoint_path, resource.patch_,\n methods=['PATCH'], endpoint=endpoint + 'patch')", "def custom_openapi() -> Dict:\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title=\"The GenomicMedLab Cool Seq Tool\",\n version=__version__,\n description=\"Common Operations On Lots-of Sequences Tool.\",\n routes=app.routes\n )\n\n openapi_schema[\"info\"][\"contact\"] = {\n \"name\": \"Alex H. Wagner\",\n \"email\": \"Alex.Wagner@nationwidechildrens.org\",\n \"url\": \"https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab\" # noqa: E501\n }\n app.openapi_schema = openapi_schema\n return app.openapi_schema", "def __init__(self, model: str, **kwargs):\n super().__init__(model=model)", "def _request_model(self, instance, success, get_embedded=True):\n coll = self.get_collection('_model')\n if get_embedded:\n callback = partial(self._get_embedded_model_names,\n instance=instance,\n success=success)\n else:\n callback = success\n\n try:\n instance['_model']\n except KeyError:\n raise tornado.web.HTTPError(400, 'Missing model key')\n coll.find_one({'_id': instance['_model']},\n callback=callback)", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def get_model(model=gin.REQUIRED):\n return model", "def __init__(self):\n self.swagger_types = {\n 'detail_type': 'str',\n 'name': 'str',\n 'store_data': 'object',\n 'discovered': 'datetime',\n 'extraction_failure': 'bool',\n 'in_trash': 'bool',\n 'is_extracted': 'bool',\n 'meta_available': 'bool',\n 'size': 'int',\n 'start_time': 'datetime',\n 'end_time': 'datetime',\n 'duration': 'float',\n 'messages': 'int',\n 'tags': 'list[Tag]'\n }\n\n self.attribute_map = {\n 'detail_type': 'detail_type',\n 'name': 'name',\n 'store_data': 'store_data',\n 'discovered': 'discovered',\n 'extraction_failure': 'extraction_failure',\n 'in_trash': 'in_trash',\n 'is_extracted': 'is_extracted',\n 'meta_available': 'meta_available',\n 'size': 'size',\n 'start_time': 'start_time',\n 'end_time': 'end_time',\n 'duration': 'duration',\n 'messages': 'messages',\n 'tags': 'tags'\n }\n\n self._detail_type = None\n self._name = None\n self._store_data = None\n self._discovered = None\n self._extraction_failure = None\n self._in_trash = None\n self._is_extracted = None\n self._meta_available = None\n self._size = None\n self._start_time = None\n self._end_time = None\n self._duration = None\n self._messages = None\n self._tags = None", "def real_model(request):\n return request.config.option.real_model", "def api():\n from gluon.contrib.hypermedia import Collection\n rules = {\n '<tablename>': {'GET':{},'POST':{},'PUT':{},'DELETE':{}},\n }\n return Collection(db).process(request,response,rules)", "def api():\n from gluon.contrib.hypermedia import Collection\n rules = {\n '<tablename>': {'GET':{},'POST':{},'PUT':{},'DELETE':{}},\n }\n return Collection(db).process(request,response,rules)", "def api():\n from gluon.contrib.hypermedia import Collection\n rules = {\n '<tablename>': {'GET':{},'POST':{},'PUT':{},'DELETE':{}},\n }\n return Collection(db).process(request,response,rules)", "def __getattr__(self, attr):\n actual_resource = getattr(self.swagger_client, attr)\n if attr in [\"Authorization\", \"Effects\", \"Identify\", \"Info\",\n \"PanelLayout\", \"State\"]:\n return WrappedResource(actual_resource, attr)\n else:\n return actual_resource", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'domain': 'str',\n 'custom_domain': 'str',\n 'customer_email': 'str',\n 'customer_name': 'str',\n 'company': 'str',\n 'date_created': 'datetime',\n 'date_validity': 'datetime',\n 'status': 'str',\n 'account_id': 'str',\n 'cluster_id': 'str',\n 'task_id': 'str',\n 'version': 'str',\n 'is_latest': 'bool',\n 'product_id': 'str',\n 'variation_id': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'domain': 'domain',\n 'custom_domain': 'custom_domain',\n 'customer_email': 'customer_email',\n 'customer_name': 'customer_name',\n 'company': 'company',\n 'date_created': 'date_created',\n 'date_validity': 'date_validity',\n 'status': 'status',\n 'account_id': 'account_id',\n 'cluster_id': 'cluster_id',\n 'task_id': 'task_id',\n 'version': 'version',\n 'is_latest': 'is_latest',\n 'product_id': 'product_id',\n 'variation_id': 'variation_id'\n }\n\n self._id = None\n self._domain = None\n self._custom_domain = None\n self._customer_email = None\n self._customer_name = None\n self._company = None\n self._date_created = None\n self._date_validity = None\n self._status = None\n self._account_id = None\n self._cluster_id = None\n self._task_id = None\n self._version = None\n self._is_latest = None\n self._product_id = None\n self._variation_id = None", "def __init__(self):\n self.swagger_types = {\n 'ids': 'list[str]',\n 'consumer': 'str',\n 'entity_type': 'str',\n 'start_date': 'datetime',\n 'end_date': 'datetime',\n 'created_date': 'datetime',\n 'updated_date': 'datetime',\n 'scope': 'str',\n 'disabled': 'bool',\n 'id': 'str'\n }\n\n self.attribute_map = {\n 'ids': 'ids',\n 'consumer': 'consumer',\n 'entity_type': 'entityType',\n 'start_date': 'startDate',\n 'end_date': 'endDate',\n 'created_date': 'createdDate',\n 'updated_date': 'updatedDate',\n 'scope': 'scope',\n 'disabled': 'disabled',\n 'id': 'id'\n }\n\n self._ids = None\n self._consumer = None\n self._entity_type = None\n self._start_date = None\n self._end_date = None\n self._created_date = None\n self._updated_date = None\n self._scope = None\n self._disabled = None\n self._id = None", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def schema(self):\n # NOTE This is exactly the same as the other thing.\n return {\n \"$id\": f\"{self.request.resource_url(self)}#schema\",\n \"type\": \"object\",\n \"properties\": {\n \"foo\": {\"type\": \"string\"},\n # generated fields shouldn't be submitted or in forms\n \"url\": {\"type\": \"string\", \"generated\": True},\n }\n }", "def __init__(self):\n self.swagger_types = {\n 'discovery': 'Discovery',\n 'groups': 'list[str]',\n 'labels': 'object'\n }\n\n self.attribute_map = {\n 'discovery': 'discovery',\n 'groups': 'groups',\n 'labels': 'labels'\n }\n\n self._discovery = None\n self._groups = None\n self._labels = None", "def model(name):\n model = Model.query.filter_by(name=name).first_or_404()\n\n if request.method == 'POST':\n # update model (publish a new version)\n validate_owner(model, request)\n\n # TODO validate the data\n # TODO should the model be sent as a separate file, w/ a checksum?\n # TODO client should first validate version stuff before submitting the full model\n data = request.get_json()\n\n try:\n version = data['meta']['version']\n model.publish(data['meta'], data['model'], version)\n model.make_archive(version)\n db.session.add(model)\n db.session.commit()\n return jsonify(status='success')\n except ModelConflictException as e:\n return jsonify(status='failure', reason=str(e)), 409\n\n elif request.method == 'DELETE':\n # deletes the entire model package\n validate_owner(model, request)\n model.destroy()\n return jsonify(status='success')\n\n elif request.method == 'PUT':\n # this is just for changing ownership atm\n validate_owner(model, request)\n data = request.get_json()\n\n user = User.query.filter_by(name=data['user']).first_or_404()\n model.owner = user\n db.session.add(model)\n db.session.commit()\n return jsonify(status='success')\n\n else:\n # download archive\n try:\n return send_from_directory(*os.path.split(model.archive()))\n except ModelNotFoundException:\n abort(404)", "def test_api_schema(self):\n response = self.client.get(\"/api/schema/\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.get(\"Content-Type\"), \"application/vnd.oai.openapi; charset=utf-8\"\n )\n self.assertEqual(\n response.get(\"Content-Disposition\"), 'inline; filename=\"Marsha API.yaml\"'\n )", "def __init__(self, model):\n self._model = model", "def index():\n definition = {\n \"swagger\": \"2.0\",\n \"info\": {\n \"title\": flask.current_app.config.get(\"APPNAME\", \"Not specified\"),\n \"version\": flask.current_app.config.get(\"VERSION\", \"Not specified\"),\n },\n \"host\": request.host,\n \"schemes\": [\"http\"],\n \"consumes\": [\"application/json\"],\n \"produces\": [\"application/json\"],\n \"definitions\": registry._definitions,\n \"paths\": {}\n }\n\n rules = list(flask.current_app.url_map.iter_rules())\n for r in sorted(rules, key=operator.attrgetter('rule')):\n if r.rule.startswith('/static'):\n continue\n if r.endpoint in registry._skipped:\n continue\n\n rule = re.sub(r\"<(?:[_a-zA-Z0-9\\(\\)]+:)?([a-zA-Z0-9_]+)>\", r\"{\\1}\", r.rule)\n if rule not in definition['paths']:\n definition['paths'][rule] = {}\n\n methods_handled = r.methods & REST_METHODS\n handler = flask.current_app.view_functions.get(r.endpoint)\n doc = handler.func_doc\n\n if len(methods_handled) == 1:\n method = methods_handled.pop().lower()\n try:\n validated = yaml.safe_load(doc)\n if not isinstance(validated, dict):\n raise Exception(\"Not a descriptor\")\n definition['paths'][rule][method] = validated\n except Exception:\n pass\n\n else:\n # We need to handle multi-method docstrings differently\n # because the documentation needs to define both, and\n # it's a higher level of the swagger hierarchy\n try:\n validated = yaml.safe_load(doc)\n if not isinstance(validated, dict):\n raise Exception(\"Not a descriptor\")\n definition['paths'][rule].update(validated)\n except Exception:\n definition['paths'][rule] = {}\n\n resp = flask.make_response(\n json.dumps(definition, for_json=True))\n resp.headers.set(\"Content-type\", 'application/json')\n resp.headers.set(\"Access-Control-Allow-Origin\", \"*\")\n return resp", "def opt_get_all_models_rest_api():\n return retrieve_all_models()", "def modelClass(self):\n raise NotImplementedError", "def CreateModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def model(self, key, model_type:T, default=undefined, description=None, **kwargs) -> T:\n return self._process(key, description=description, default=default, cast=cast_pydantic(model_type),type=model_type, **kwargs)", "def get_response_model_ctor(self):\n return self._response_model_ctor", "def load_model(self) -> Any:", "def __init__(self):\n self.swagger_types = {\n 'detail_type': 'str',\n 'identifier': 'int',\n 'success': 'bool',\n 'description': 'str',\n 'duration': 'float',\n 'bag_name': 'str',\n 'bag_store_name': 'str',\n 'results': 'object',\n 'bag': 'BagSummary'\n }\n\n self.attribute_map = {\n 'detail_type': 'detail_type',\n 'identifier': 'identifier',\n 'success': 'success',\n 'description': 'description',\n 'duration': 'duration',\n 'bag_name': 'bag_name',\n 'bag_store_name': 'bag_store_name',\n 'results': 'results',\n 'bag': 'bag'\n }\n\n self._detail_type = None\n self._identifier = None\n self._success = None\n self._description = None\n self._duration = None\n self._bag_name = None\n self._bag_store_name = None\n self._results = None\n self._bag = None", "def MakeModel(self):\n pass", "def __init__(self, name):\n super(SchemaStub, self).__init__()\n self.model = SchemaStub._ModelStub()\n self.name = name", "def test_model_endpoint(request, client, fixture_name, detail_attr, comparison_attr):\n instance = request.getfixturevalue(fixture_name)\n model = instance.__class__\n resource_name = model.__name__.lower()\n\n # test list endpoint\n response = client.get(api_reverse(\"%s-list\" % resource_name))\n check_response(response)\n results = response.json()['results']\n assert results\n assert len(results) == model.objects.count()\n\n # test detail endpoint\n response = client.get(api_reverse(\"%s-detail\" % resource_name, args=[getattr(instance, detail_attr)]))\n check_response(response)\n results = response.json()\n assert results[comparison_attr] == getattr(instance, comparison_attr)", "def create_model_endpoint(\n cls,\n db_session: sqlalchemy.orm.Session,\n model_endpoint: mlrun.common.schemas.ModelEndpoint,\n ) -> mlrun.common.schemas.ModelEndpoint:\n\n if model_endpoint.spec.model_uri or model_endpoint.status.feature_stats:\n logger.info(\n \"Getting feature metadata\",\n project=model_endpoint.metadata.project,\n model=model_endpoint.spec.model,\n function=model_endpoint.spec.function_uri,\n model_uri=model_endpoint.spec.model_uri,\n )\n\n # If model artifact was supplied, grab model metadata from artifact\n if model_endpoint.spec.model_uri:\n logger.info(\n \"Getting model object, inferring column names and collecting feature stats\"\n )\n run_db = mlrun.api.api.utils.get_run_db_instance(db_session)\n model_obj: mlrun.artifacts.ModelArtifact = (\n mlrun.datastore.store_resources.get_store_resource(\n model_endpoint.spec.model_uri, db=run_db\n )\n )\n\n # Get stats from model object if not found in model endpoint object\n if not model_endpoint.status.feature_stats and hasattr(\n model_obj, \"feature_stats\"\n ):\n if model_obj.spec.feature_stats:\n mlrun.common.model_monitoring.helpers.pad_features_hist(\n mlrun.common.model_monitoring.helpers.FeatureStats(\n model_obj.spec.feature_stats\n )\n )\n model_endpoint.status.feature_stats = model_obj.spec.feature_stats\n # Get labels from model object if not found in model endpoint object\n if not model_endpoint.spec.label_names and model_obj.spec.outputs:\n model_label_names = [\n mlrun.api.crud.model_monitoring.helpers.clean_feature_name(f.name)\n for f in model_obj.spec.outputs\n ]\n model_endpoint.spec.label_names = model_label_names\n\n # Get algorithm from model object if not found in model endpoint object\n if not model_endpoint.spec.algorithm and model_obj.spec.algorithm:\n model_endpoint.spec.algorithm = model_obj.spec.algorithm\n\n # Create monitoring feature set if monitoring found in model endpoint object\n if (\n model_endpoint.spec.monitoring_mode\n == mlrun.common.schemas.model_monitoring.ModelMonitoringMode.enabled.value\n ):\n monitoring_feature_set = cls.create_monitoring_feature_set(\n model_endpoint, model_obj, db_session, run_db\n )\n # Link model endpoint object to feature set URI\n model_endpoint.status.monitoring_feature_set_uri = (\n monitoring_feature_set.uri\n )\n\n # If feature_stats was either populated by model_uri or by manual input, make sure to keep the names\n # of the features. If feature_names was supplied, replace the names set in feature_stats, otherwise - make\n # sure to keep a clean version of the names\n if model_endpoint.status.feature_stats:\n logger.info(\"Feature stats found, cleaning feature names\")\n if model_endpoint.spec.feature_names:\n # Validate that the length of feature_stats is equal to the length of feature_names and label_names\n cls._validate_length_features_and_labels(model_endpoint=model_endpoint)\n\n # Clean feature names in both feature_stats and feature_names\n (\n model_endpoint.status.feature_stats,\n model_endpoint.spec.feature_names,\n ) = cls._adjust_feature_names_and_stats(model_endpoint=model_endpoint)\n\n logger.info(\n \"Done preparing feature names and stats\",\n feature_names=model_endpoint.spec.feature_names,\n )\n\n # If none of the above was supplied, feature names will be assigned on first contact with the model monitoring\n # system\n logger.info(\"Creating model endpoint\", endpoint_id=model_endpoint.metadata.uid)\n\n # Write the new model endpoint\n model_endpoint_store = get_model_endpoint_store(\n project=model_endpoint.metadata.project,\n secret_provider=mlrun.api.crud.secrets.get_project_secret_provider(\n project=model_endpoint.metadata.project\n ),\n )\n model_endpoint_store.write_model_endpoint(endpoint=model_endpoint.flat_dict())\n\n logger.info(\"Model endpoint created\", endpoint_id=model_endpoint.metadata.uid)\n\n return model_endpoint", "def test_get_model(self) -> None:\n get_model()", "def emit_swagger_spec(ctx, modules, fd, path):\n\n printed_header = False\n model = OrderedDict()\n definitions = OrderedDict()\n augments = list()\n # Go through all modules and extend the model.\n for module in modules:\n if not printed_header:\n model = print_header(module, fd)\n printed_header = True\n path = '/'\n\n typdefs = [module.i_typedefs[element] for element in module.i_typedefs]\n models = list(module.i_groupings.values())\n referenced_types = list()\n referenced_types = findTypedefs(ctx, module, models, referenced_types)\n for element in referenced_types:\n typdefs.append(element)\n\n # The attribute definitions are processed and stored in the \"typedefs\" data structure for further use.\n gen_typedefs(typdefs)\n\n # list() needed for python 3 compatibility\n referenced_models = list()\n referenced_models = findModels(ctx, module, models, referenced_models)\n for element in referenced_models:\n models.append(element)\n # Print the swagger definitions of the Yang groupings.\n definitions = gen_model(models, definitions)\n\n # If a model at runtime was dependant of another model which had been encounter yet, it is generated 'a posteriori'.\n if pending_models:\n gen_model(pending_models, definitions)\n\n if PARENT_MODELS:\n for element in PARENT_MODELS:\n if PARENT_MODELS[element]['models']:\n definitions[element]['discriminator'] = PARENT_MODELS[element]['discriminator']\n # extract children which contain data definition keywords\n chs = [ch for ch in module.i_children\n if ch.keyword in (statements.data_definition_keywords + ['rpc','notification'])]\n\n # generate the APIs for all children\n if len(chs) > 0:\n model['paths'] = OrderedDict()\n gen_apis(chs, path, model['paths'], definitions)\n\n model['definitions'] = definitions\n fd.write(json.dumps(model, indent=4, separators=(',', ': ')))", "def detail(self, req):\n return self._get_models(req, is_detail=True)", "def get(self, id=None):\n self.reset(id)\n url = ASSEMBLYAI_URL + '/model/' + str(self.id)\n response = requests.get(url, headers=self.headers)\n self.warning = handle_warnings(response, 'model')\n response = response.json()['model']\n # self.phrases = response['phrases']\n self.dict = response\n self.status = response['status']\n logging.debug('Model %s %s' % (self.id, self.status))\n return self", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'error_entity': 'DomainEntityRef',\n 'related_entity': 'DomainEntityRef',\n 'timestamp': 'datetime',\n 'level': 'str',\n 'category': 'str',\n 'correlation_id': 'str',\n 'event_message': 'EventMessage',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'error_entity': 'errorEntity',\n 'related_entity': 'relatedEntity',\n 'timestamp': 'timestamp',\n 'level': 'level',\n 'category': 'category',\n 'correlation_id': 'correlationId',\n 'event_message': 'eventMessage',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._error_entity = None\n self._related_entity = None\n self._timestamp = None\n self._level = None\n self._category = None\n self._correlation_id = None\n self._event_message = None\n self._self_uri = None", "def GetModel(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def test_integration_test_add_model(\n test_input, properties, required, defaults\n):\n with patch_registry() as registry:\n swagger.add_model(test_input)\n\n assert test_input.__name__ in registry[\"models\"]\n assert \"description\" in registry[\"models\"][test_input.__name__]\n assert \"notes\" in registry[\"models\"][test_input.__name__]\n\n if \"resource_fields\" not in dir(test_input) and \"__init__\" not in dir(\n test_input\n ):\n # in py2, classes without __init__ or resource_fields defined\n # will cause issues.\n # note, no issue in PY3.\n pytest.fail(\n \"do not call without resource_fields or __init__ defined.\"\n )\n\n if \"resource_fields\" in dir(test_input):\n if hasattr(test_input, \"required\"):\n assert \"required\" in registry[\"models\"][test_input.__name__]\n elif \"__init__\" in dir(test_input):\n assert \"required\" in registry[\"models\"][test_input.__name__]\n\n assert \"properties\" in registry[\"models\"][test_input.__name__]", "def test_basic_api_inline_swagger(self):\n self.create_and_verify_stack(\"single/basic_api_inline_swagger\")\n\n first_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(first_dep_ids), 1)\n\n body = self.get_template_resource_property(\"MyApi\", \"DefinitionBody\")\n body[\"basePath\"] = \"/newDemo\"\n self.set_template_resource_property(\"MyApi\", \"DefinitionBody\", body)\n self.update_stack()\n\n second_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(second_dep_ids), 1)\n\n self.assertEqual(len(set(first_dep_ids).intersection(second_dep_ids)), 0)", "def test_get_model_list():\n with app.test_client() as c:\n response = c.get('/REST/api/v1.0/model_list') \n assert response.status_code == 201", "def test_add_model_init_parsing_args(model_class, required, defaults):\n with patch_registry() as registry, patch_parse_doc(), patch_dir(\n [\"__init__\"]\n ):\n swagger.add_model(model_class)\n\n assert model_class.__name__ in registry[\"models\"]\n assert registry[\"models\"][model_class.__name__][\"required\"] == required\n for key, default_value in defaults:\n _name = model_class.__name__\n assert key in registry[\"models\"][_name][\"properties\"]\n assert (\n default_value\n == registry[\"models\"][_name][\"properties\"][key][\"default\"]\n )", "def model_endpoint():\n if request.method == 'POST':\n # Build a new Kiosk login\n data: object = {\"Name\": \"Fred\", \"authkey\": \"Im a base64 encoded authorization key\"}\n return jsonify(data.__dict__)\n else:\n return \"\"", "def __init__(self, id: int=None, owner: str=None, code: int=None, status: str=None):\n self.swagger_types = {\n 'id': int,\n 'owner': str,\n 'code': int,\n 'status': str\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'owner': 'owner',\n 'code': 'code',\n 'status': 'status'\n }\n\n self._id = id\n self._owner = owner\n self._code = code\n self._status = status", "def to_api_repr(self):\n raise NotImplementedError", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'key': 'str',\n 'display_name': 'str',\n 'description': 'str',\n 'glossary_key': 'str',\n 'parent_term_key': 'str',\n 'is_allowed_to_have_child_terms': 'bool',\n 'path': 'str',\n 'lifecycle_state': 'str',\n 'time_created': 'datetime',\n 'time_updated': 'datetime',\n 'created_by_id': 'str',\n 'updated_by_id': 'str',\n 'owner': 'str',\n 'workflow_status': 'str',\n 'uri': 'str',\n 'associated_object_count': 'int',\n 'associated_objects': 'list[TermAssociatedObject]'\n }\n\n self.attribute_map = {\n 'key': 'key',\n 'display_name': 'displayName',\n 'description': 'description',\n 'glossary_key': 'glossaryKey',\n 'parent_term_key': 'parentTermKey',\n 'is_allowed_to_have_child_terms': 'isAllowedToHaveChildTerms',\n 'path': 'path',\n 'lifecycle_state': 'lifecycleState',\n 'time_created': 'timeCreated',\n 'time_updated': 'timeUpdated',\n 'created_by_id': 'createdById',\n 'updated_by_id': 'updatedById',\n 'owner': 'owner',\n 'workflow_status': 'workflowStatus',\n 'uri': 'uri',\n 'associated_object_count': 'associatedObjectCount',\n 'associated_objects': 'associatedObjects'\n }\n\n self._key = None\n self._display_name = None\n self._description = None\n self._glossary_key = None\n self._parent_term_key = None\n self._is_allowed_to_have_child_terms = None\n self._path = None\n self._lifecycle_state = None\n self._time_created = None\n self._time_updated = None\n self._created_by_id = None\n self._updated_by_id = None\n self._owner = None\n self._workflow_status = None\n self._uri = None\n self._associated_object_count = None\n self._associated_objects = None", "def _generate_implicit_api_resource(self):\n return ImplicitHttpApiResource().to_dict()", "def get_model(*args):\n return Model()", "def render_documentation(app_slug: str, model_slug: str):\n context = {\"app_slug\": app_slug, \"model_slug\": model_slug}\n\n app = SLUG_MAP.get(app_slug, None)\n model_choices = (\n [model for model in app.get_models() if model.get_slug() == model_slug]\n if app is not None\n else []\n )\n model = model_choices[0] if len(model_choices) == 1 else None\n\n fields = {}\n if model is not None:\n for field in model.get_open_fields():\n\n relation = None\n if field.is_relation:\n app_slug = slugify(\n get_app_name(\n field.related_model._meta.app_config # pylint: disable=W0212\n )\n )\n relation = {\n \"model\": field.related_model.__name__,\n \"doc_link\": reverse(\n \"documentation:details\", kwargs={\"app_slug\": app_slug}\n ),\n \"model_slug\": field.related_model.get_slug(),\n }\n\n fields[field.name] = {\n \"name\": field.name,\n \"optional\": field.null,\n \"default\": field.default if field.has_default() else None,\n \"help\": convert_string(field.help_text),\n \"type\": field.get_internal_type(),\n \"relation\": relation,\n }\n\n context[\"name\"] = model.__name__\n context[\"module\"] = model.__module__\n context[\"doc\"] = convert_string(model.__doc__, wrap_blocks=True)\n context[\"base\"] = str(model.__base__.__name__)\n # For the rare case where a field name is items, prefer this key val iteration\n context[\"columns\"] = [(key, val) for key, val in fields.items()]\n\n return context", "def get_model():\n return UNISAL", "def __init__(self, id: str=None, label: str=None):\n self.openapi_types = {\n 'id': str,\n 'label': str\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'label': 'label'\n }\n\n self._id = id\n self._label = label" ]
[ "0.6751859", "0.633995", "0.63148445", "0.6260051", "0.6195077", "0.6152982", "0.5982219", "0.59721094", "0.59506464", "0.5930559", "0.5904223", "0.5891059", "0.5891059", "0.5891059", "0.5891059", "0.5891059", "0.58744615", "0.5842159", "0.58389103", "0.5817477", "0.5785608", "0.57788867", "0.57564247", "0.5724594", "0.57239014", "0.5709965", "0.5706333", "0.57058513", "0.569756", "0.5693229", "0.56817025", "0.56653696", "0.565673", "0.56513965", "0.5635958", "0.5615666", "0.5608194", "0.55912924", "0.55676", "0.55651164", "0.55551326", "0.5546613", "0.5522435", "0.55071884", "0.55030906", "0.549178", "0.5453457", "0.5449884", "0.5447271", "0.5441783", "0.5430301", "0.5430301", "0.5430301", "0.5430301", "0.5429765", "0.54283834", "0.5426351", "0.5416934", "0.5416934", "0.5416934", "0.5415847", "0.54150736", "0.5406673", "0.5395721", "0.5395721", "0.53954375", "0.53761995", "0.5373646", "0.5366702", "0.53578395", "0.5346978", "0.53361243", "0.5332102", "0.53288496", "0.5324498", "0.5313588", "0.52965236", "0.529537", "0.5294875", "0.529337", "0.5286082", "0.5278969", "0.5271166", "0.5269775", "0.5251296", "0.5248017", "0.5245588", "0.5236372", "0.5225033", "0.52167445", "0.5209655", "0.5207763", "0.52065337", "0.52045304", "0.5204195", "0.5201296", "0.51878625", "0.5187763", "0.5187171", "0.5185392", "0.51841164" ]
0.0
-1
Sets the caller of this Dial.
def caller(self, caller): self._caller = caller
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_scripts_caller(self, caller):\n self._scripts_caller = caller", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def caller_reference(self) -> str:\n return pulumi.get(self, \"caller_reference\")", "def dialstring(self, dialstring):\n\n self._dialstring = dialstring", "def setPeer (self, peer):\n\t\tself.peer = peer", "def call(self, callee: \"SIPPhoneTemplate\") -> None:", "def set_follower(self, follower):\n self.follower = follower", "def referred_by_name(self, referred_by_name: str):\n self._referred_by_name = referred_by_name", "def CALL_addr(self, addr):\n\t\tself.stack[self.SP] = self.IP\n\t\tself.SP += 1\n\t\tself.IP = addr", "def sender(self) -> Address:\n return self._sender", "def sender(self, sender: Address) -> None:\n enforce(\n isinstance(sender, str), f\"Sender must be string. Found '{type(sender)}'\"\n )\n self._sender = sender", "def ping(self, caller):\n if not hasattr(self, \"_ping_callers\"):\n self._ping_callers = []\n self._ping_callers.append(caller)\n super(ServerBot, self).msg(ping=\"\")", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def call_from_contact(self):\n\n log_test_case(self.name, 'call_from_contact')\n #lick_textview_by_text(SC.PRIVATE_CONTACT_NUMBER)\n click_textview_by_id('primary_action_view')\n sleep(1)\n goback()\n sleep(3)\n return", "def sender(self) -> str:\n return self._sender", "def set_owner(self, owner):\n self.__owner = owner", "def owner(self, owner: str):\n\n self._owner = owner", "def sender(self, sender: str):\n if sender is None:\n raise ValueError(\"Invalid value for `sender`, must not be `None`\") # noqa: E501\n\n self._sender = sender", "def salt_caller(self):\n if self._salt_caller is None:\n self._salt_caller = salt.client.Caller()\n return self._salt_caller", "def toggle_call(self) -> None:", "def handle_call(self):\n call_socket, address = self.call_socket.accept()\n print(\"connected call socket: {}\".format(call_socket))\n # gets name of user making the call:\n caller_name = self.receive_mes(call_socket)\n # gets from calling client user they want to call:\n receiver_name = self.receive_mes(call_socket)\n # gets receivers socket from dictionary\n if receiver_name not in self.client_dict:\n print(\"boi bye\")\n sys.exit(EXIT)\n receiver_sock = self.client_dict[receiver_name]\n mes = \"{} is calling you\".format(caller_name)\n self.send_mes(mes.encode(), receiver_sock)\n answer = self.receive_mes(receiver_sock)\n print(\"answer from {}: {}\".format(receiver_name, answer))\n if answer == \"Y\":\n self.send_mes(\"call\".encode(), call_socket)\n self.start_call()\n else:\n self.send_mes(\"no call\".encode(), call_socket)", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def InvocationAddCaller(builder, caller):\n return AddCaller(builder, caller)", "def prompt(self, upstream_name):\n self.prompt_events[upstream_name].set()", "def on(self, o_self):\r\n self.o_self = o_self\r\n return self", "def sender_name(self, sender_name):\n\n self._sender_name = sender_name", "def parent(self, value):\n\t\tself._parent = value", "def set_owner(self, owner):\n self.settings[\"owner\"] = owner", "def call(self) -> global___Snippet.ClientCall:", "def call(self) -> global___Snippet.ClientCall:", "def set_receiver(self, receiver):\n self.receiver = receiver", "def _initiate(self, call):\n if not self.gsm_call:\n raise Exception(\"No connectivity\")\n number = str(call.number)\n logger.info(\"initiate call to %s\", number)\n call_id = yield WaitDBus(self.gsm_call.Initiate, number, \"voice\")\n call_id = int(call_id)\n logger.info(\"call id : %d\", call_id)\n self.lines[call_id] = call\n # TODO: mabe not good idea to store this in the call itself,\n # beside, it makes pylint upset.\n call.__id = call_id", "def delegate(self, delegee_gidfile, caller_keyfile, caller_gidfile):\n # get the gid of the object we are delegating\n object_gid = self.get_gid_object()\n object_hrn = object_gid.get_hrn() \n \n # the hrn of the user who will be delegated to\n delegee_gid = GID(filename=delegee_gidfile)\n delegee_hrn = delegee_gid.get_hrn()\n \n #user_key = Keypair(filename=keyfile)\n #user_hrn = self.get_gid_caller().get_hrn()\n subject_string = \"%s delegated to %s\" % (object_hrn, delegee_hrn)\n dcred = Credential(subject=subject_string)\n dcred.set_gid_caller(delegee_gid)\n dcred.set_gid_object(object_gid)\n dcred.set_parent(self)\n dcred.set_expiration(self.get_expiration())\n dcred.set_privileges(self.get_privileges())\n dcred.get_privileges().delegate_all_privileges(True)\n #dcred.set_issuer_keys(keyfile, delegee_gidfile)\n dcred.set_issuer_keys(caller_keyfile, caller_gidfile)\n dcred.encode()\n dcred.sign()\n\n return dcred", "def _set_parent(self, parent):\n self.__parent = parent", "def set_owner(self, owner, is_stream=False):\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner", "def sender(self):\n return self._sender", "def setCurrentUser(self, provider):\n pass", "def _determine_parent(self, caller):\n self.msgin(4, \"determine_parent\", caller)\n\n parent = None\n if caller:\n pname = caller.identifier\n\n if isinstance(caller, Package):\n parent = caller\n\n elif '.' in pname:\n pname = pname[:pname.rfind('.')]\n parent = self.findNode(pname)\n\n elif caller.packagepath:\n # XXX: I have no idea why this line\n # is necessary.\n parent = self.findNode(pname)\n\n self.msgout(4, \"determine_parent ->\", parent)\n return parent", "def sender(self) -> str:", "def setCurrentPlayer(self):\n self.current_player = next(self.get_player_order_fn())", "def __init__(self, number=None, **kwargs):\n super(Dial, self).__init__(**kwargs)\n if number:\n self.value = number", "def prompt(self, upstream_name):\n request = PromptRequest(upstream_name=upstream_name)\n response = self.stub.Prompt(request, timeout=5)", "def invoker(self) -> User:\n return self.msg.source", "def __init__(self, caller=None, dialstatus=None, dialstring=None, forward=None, forwarded=None, peer=None): # noqa: E501 # noqa: E501\n\n self._caller = None\n self._dialstatus = None\n self._dialstring = None\n self._forward = None\n self._forwarded = None\n self._peer = None\n self.discriminator = None\n\n if caller is not None:\n self.caller = caller\n self.dialstatus = dialstatus\n if dialstring is not None:\n self.dialstring = dialstring\n if forward is not None:\n self.forward = forward\n if forwarded is not None:\n self.forwarded = forwarded\n self.peer = peer", "def reply_to(self):\n return self.receiver.remote_source.address", "def outgoing_caller_id(self, sid):\r\n return numbers.OutgoingCallerId(self, sid)", "def _set_source(self, source):\n if source != self._source:\n self._source = source\n self._channel = \"\"\n self._channel_name = \"\"\n self._is_forced_val = True\n self._forced_count = 0", "def wm_dial(self):\n return self.get_par(\"dial_readback\")", "def set_referer(self, url):\n self._opener.set_header(('Referer', urlrewrite.get_referer(url)))", "def set_follow(self, follow):\n self.follow = follow", "def place_call_onhold(self) -> None:", "def setParent(self, parent):\n if parent is None:\n self.__parent = None\n else:\n self.__parent = weakref.ref(parent)", "def showsender(self):\n return self.sender", "def setParent(self, parent):\n self.parent = parent", "def set_run_from(self, run_from):\n self._run_from = run_from", "def set_parent(self, parent):\n self.parent = parent", "def set_parent(self, parent):\n self.parent = parent", "def set_parent(self, parent_node):\n self.set_parent = parent_node", "def user_call(self, frame, argument_list):\n pass", "def asCurrent(self):\n InvocationLocal.__currentinvocation__ = self", "def __getattr__(self, item):\n return Caller(self, item)", "def set_return_value(self, args, kwargs, value):\n args = deepcopy(args)\n kwargs = deepcopy(kwargs)\n value = deepcopy(value)\n\n self._seeded_calls.insert(0, ((args, kwargs), value))", "def setparent(self, parent):\n\t\tself._setparent(parent)", "def peer(self, value: Optional[MicrobitPeer]) -> None:\n if self.__peer is not None:\n self.__peer.remove_listener(self.__execute)\n\n if value is not None:\n value.add_listener(self.__execute)\n\n self.__peer = value\n self.__sync_x()\n self.__sync_y()\n self.__sync_z()\n self.__sync_current_gesture()", "async def set_bot():\n\n self = await LOCAL.APP.get_me()\n LOCAL.bot_id = self.id\n LOCAL.bot_name = self.first_name\n LOCAL.bot_username = self.username", "def get_nicklist(self, caller=None):\n if not hasattr(self, \"_nicklist_callers\"):\n self._nicklist_callers = []\n if caller:\n self._nicklist_callers.append(caller)\n super(ServerBot, self).msg(request_nicklist=\"\")\n return", "def possessed_by(self, other):\r\n self.owner = other", "def sender(self):\n l = self.link\n if l and l.is_sender:\n return l\n else:\n return None", "def guess_caller(vr):\n if \"source\" in vr.metadata and len(vr.metadata[\"source\"]) == 1:\n # Callers that follow the VCF spec: FreeBayes, pindel\n caller = vr.metadata[\"source\"][0].split(None, 1)[0]\n elif \"GATKCommandLine.MuTect\" in vr.metadata:\n # GATK/SATK 3.4+\n caller = \"MuTect\"\n elif \"GATKCommandLine.HaplotypeCaller\" in vr.metadata:\n caller = \"HaplotypeCaller\"\n elif \"GATKCommandLine.UnifiedGenotyper\" in vr.metadata:\n caller = \"UnifiedGenotyper\"\n elif \"GATKCommandLine\" not in vr.metadata:\n raise ValueError(\"Bad VCF header missing caller info:\\n%s\"\n % vr.metadata)\n else:\n if len(vr.metadata[\"GATKCommandLine\"]) == 2:\n # It's \"private\" to UG vs. HC, via vcf_comp\n caller = \"UnifiedGenotyper\"\n else:\n # GATK tools don't follow the spec\n gatk_info = vr.metadata[\"GATKCommandLine\"]\n assert len(gatk_info) == 1\n ##GATKCommandLine=<ID=UnifiedGenotyper,CommandLineOptions=\"...\n caller = gatk_info[0][\"ID\"]\n return caller", "def _self(self, _self):\n\n self.__self = _self", "def _self(self, _self):\n\n self.__self = _self", "def __init__(self, sender):\r\n\t\tself.sender = sender", "def force_contact(self, *args, **kwargs) -> Any:\n pass", "def set_designator(self, ref):\n self.ref = ref", "def sender(self):\n key, alt = ('Sender', 'From') if not self.resent else \\\n ('Resent-Sender', 'Resent-From')\n value = self.get(key) or self.get(alt)\n _, addr = getaddresses([value])[0]\n return addr", "def SetFather(self, *args):\n return _XCAFDoc.XCAFDoc_GraphNode_SetFather(self, *args)", "def set_current_screen(self, screen):\n\t\tself.current_screen = screen\n\t\tscreen.screen_manager = self", "def show(self, caller_is_main: bool):\n pass", "def set_master_connection(self, master_connection):\n self.master_connection = master_connection\n self.master_connection.parent = self\n self.master_connection.run()", "def InvocationAddCallerAuthid(builder, callerAuthid):\n return AddCallerAuthid(builder, callerAuthid)", "def _set_request(self, request):\n self._request = request", "def kick(self, mask, target, args):\n if not args['<channel>']:\n args['<channel>'] = target\n elif args['<channel>'] and args['<reason>']:\n if not IrcString(args['<channel>']).is_channel:\n args['<reason>'].insert(0, args['<channel>'])\n args['<channel>'] = target\n if not args['<reason>']:\n self.bot.send('KICK %s %s' % (as_channel(args['<channel>']), args['<nick>']))\n else:\n self.bot.send('KICK %s %s :%s' % (as_channel(args['<channel>']), args['<nick>'], ' '.join(args['<reason>'])))", "def set_parent(self, parent):\n self._parent = parent", "def response_host(self, response_host):\n\n self._response_host = response_host", "def response_host(self, response_host):\n\n self._response_host = response_host", "def _set_parent(self, parent):\n assert self._parent == None # implementing reparenting requires more work\n # Set the parent silently to dodge setattr parent handling.\n object.__setattr__(self, '_parent', parent)", "def _set_parent(self, parent):\n assert self._parent == None # implementing reparenting requires more work\n self._parent = parent", "def set_thread_call_context(ctx):\n _local.context = ctx\n return ctx", "def referred_by_name(self) -> str:\n return self._referred_by_name", "def counterparty(self, counterparty):\n\n self._counterparty = counterparty" ]
[ "0.5862553", "0.55642617", "0.55642617", "0.55642617", "0.55642617", "0.55642617", "0.55458647", "0.52504754", "0.52443874", "0.5207483", "0.5065583", "0.5034338", "0.50276506", "0.50047404", "0.5002639", "0.49828595", "0.49691632", "0.49691632", "0.49691632", "0.49691632", "0.4918082", "0.49063438", "0.48871636", "0.48751572", "0.48585597", "0.4850022", "0.48449415", "0.48439252", "0.48348072", "0.48348072", "0.48348072", "0.48348072", "0.48317614", "0.48235056", "0.4808994", "0.47987616", "0.4788401", "0.4783301", "0.47586665", "0.47586665", "0.47572166", "0.47422725", "0.47248638", "0.4722756", "0.4718583", "0.47144857", "0.4702746", "0.47008097", "0.46894193", "0.4683123", "0.46728486", "0.46658778", "0.46603203", "0.46299127", "0.46292567", "0.46161097", "0.45953882", "0.45766807", "0.45649692", "0.45636028", "0.455995", "0.45592052", "0.45578226", "0.45418406", "0.45378938", "0.4537652", "0.4537652", "0.4534272", "0.4532504", "0.45317164", "0.45277396", "0.4525888", "0.45228368", "0.4520891", "0.45186153", "0.45147094", "0.45140722", "0.45040607", "0.44967633", "0.44961676", "0.44961676", "0.44828042", "0.44813082", "0.4479513", "0.4477259", "0.44746178", "0.4474565", "0.44723496", "0.4468718", "0.44676423", "0.446466", "0.44634482", "0.4461694", "0.4461507", "0.4461507", "0.44605172", "0.44537458", "0.44523525", "0.44512287", "0.44483927" ]
0.74337715
0
Sets the dialstatus of this Dial.
def dialstatus(self, dialstatus): if dialstatus is None: raise ValueError("Invalid value for `dialstatus`, must not be `None`") # noqa: E501 self._dialstatus = dialstatus
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def setstatus(self, status):\n with self.lock:\n self.status = status", "def SetStatus(self, status):\r\n self.status = status", "def set_status(self, status: str) -> None:\n\n try:\n self.status = Buddy.status_map[status.lower()]\n except KeyError:\n self.status = status", "def set_vpn_state(self, status):\n if hasattr(self, status):\n self.change_to(getattr(self, status))", "def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True", "def set_status(self, status):\n # TODO log to db\n self.status = status", "def setStatus(self, status):\n self.__status = status", "def status(self, status):\n self._set_property_(self.STATUS, str(status))", "def _set_status(self, status):\n with self.status_lock:\n if (status in _ENDING_STATUSES) or (not self.status in _ENDING_STATUSES):\n self.status = status", "async def set_status(self, ctx, *, status: str = \"online\"):\n\n try:\n status = discord.Status[status.lower()]\n except KeyError:\n await ctx.error(\"Invalid Status\", \"Only `online`, `idle` or `dnd` statuses are available.\")\n else:\n await self.bot.change_presence(status=status, activity=ctx.me.activity)\n await ctx.success(f\"Status changed to {status}.\")", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def set_activity(self, status):\n self._activity = status", "def status(self, status):\n allowed_values = [\"D\", \"P\", \"V\", \"S\", \"M\", \"I\", \"R\", \"C\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def set_status(self, status):\n if status == 'qw':\n status = 'Waiting'\n elif status == 'hqw':\n status = 'Held'\n elif status == 'Eqw':\n status = 'Error'\n else:\n sys.exit(20)\n self.status = status\n return", "def set_on_tunnel(self, status: bool):\n self._is_on_tunnel = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n allowed_values = [\"I\", \"A\", \"S\", \"T\", \"D\"]\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status`, must be one of {0}\"\n .format(allowed_values)\n )\n self._status = status", "def set_status(self, status):\n if status == \"offline\":\n self._status.set_message(\"N\")\n self._status.set_foreground_color(\"red\")\n \n elif status == \"online\":\n self._status.set_message(\"Y\")\n self._status.set_foreground_color(\"Green\")\n \n elif status == \"away\":\n self._status.set_message(\"A\")\n self._status.set_foreground_color(\"Grey\")\n \n elif status == \"busy\":\n self._status.set_message(\"B\")\n self._status.set_foreground_color(\"Yellow\")", "def set_device_status(self, door_id: int, door_status: DoorStatus) -> None:\n self._info_data[f\"door{door_id}\"][\"status\"] = door_status.value", "def set_status(self, status: HTTPProxyStatus) -> None:\n self._status = status\n self.update_actor_details(status=self._status)", "def status(self, status: str):\n\n self._status = status", "def status(self, status: str):\n\n self._status = status", "def SetConnectionStatus(self, state, info):\n self.connection_state = state\n self.connection_info = info", "def set_connection_status(self, connection_status: Literal[ConnectionState]) -> None:\n self.connection_status = connection_status\n self.publish(self.key_gen(\"connection_status\"), connection_status)", "def set_status_led_config(self, config):\n self.check_validity()\n\n config = int(config)\n\n self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_SET_STATUS_LED_CONFIG, (config,), 'B', 0, '')", "def change_stepper_status(self, status):\n\n if status:\n GPIO.output(26, GPIO.HIGH)\n else:\n GPIO.output(26, GPIO.LOW)", "def connection_status(self, connection_status):\n allowed_values = [\"Unknown\", \"Success\", \"Failure\"]\n if connection_status not in allowed_values:\n raise ValueError(\n \"Invalid value for `connection_status` ({0}), must be one of {1}\"\n .format(connection_status, allowed_values)\n )\n\n self._connection_status = connection_status", "def update_remediation_status(self, status):\n self.remediation_status = status", "def setEnabled(self, status):\r\n self._status = status\r\n\r\n if status:\r\n self._start()\r\n else:\r\n self._stop()\r\n\r\n for cb in self._statusListener:\r\n cb(self, status)", "def set_status_led_config(self, config):\n config = int(config)\n\n self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_SET_STATUS_LED_CONFIG, (config,), 'B', '')", "def set_remote_status(self, mode):\n status = {\n 0: \"Local and locked\",\n 1: \"Remote and locked\",\n 2: \"Local and unlocked\",\n 3: \"Remote and unlocked\",\n }\n logging.info(__name__ + ' : Setting remote control status to %s' % status.get(mode, \"Unknown\"))\n self._execute('C%s' % mode)", "def set_desired_connection_status(self, connection_status: Literal[ConnectionState]) -> None:\n self.desired_connection_status = connection_status\n self.publish(self.key_gen(\"desired_connection_status\"), connection_status)", "def change_status(self, inf, status):\n self.interfaces[inf]['status'] = status", "def status(self, status: str):\n allowed_values = [\"OPEN\", \"WAITING_RESOLUTION\", \"CONFIRMED\", \"CANCELLED\", \"COMPLETED\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\"\n .format(status, allowed_values)\n )\n\n self._status = status", "def status(self, status):\n allowed_values = [\"C\", \"D\", \"P\", \"I\", \"E\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def status(self, status: int):\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n\n self._status = status", "def status(self, status):\n allowed_values = [\"REQUESTED\", \"CREATE_IN_PROGRESS\", \"AVAILABLE\", \"UPDATE_IN_PROGRESS\", \"UPDATE_REQUESTED\", \"UPDATE_FAILED\", \"CREATE_FAILED\", \"ENABLE_SECURITY_FAILED\", \"PRE_DELETE_IN_PROGRESS\", \"DELETE_IN_PROGRESS\", \"DELETE_FAILED\", \"DELETE_COMPLETED\", \"STOPPED\", \"STOP_REQUESTED\", \"START_REQUESTED\", \"STOP_IN_PROGRESS\", \"START_IN_PROGRESS\", \"START_FAILED\", \"STOP_FAILED\", \"WAIT_FOR_SYNC\", \"MAINTENANCE_MODE_ENABLED\"]\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\"\n .format(status, allowed_values)\n )\n\n self._status = status", "def status(self, status):\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\")\n allowed_values = [\"success\", \"warning\", \"error\", \"pending\"]\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\"\n .format(status, allowed_values)\n )\n\n self._status = status", "def setstatus(self,statusin):\n if self.status in self.statuserrors:\n print \"Action forbidden.\"\n self.inputstatus.setEntry(self.status)\n return \n if statusin == None: status=self.inputstatus.getEntry()\n else: status=statusin\n if status not in self.statuses:\n print \"Unknown status:\",status\n #self.inputstatus.setEntry(self.status)\n return\n option=status[0]\n cmd=\"setStatus(\"+'\"'+self.detector+'\"'+\",\"+self.inpnumDIM +\",\"+\"'\"+option+\"'\"+\")\"\n output=self.vb.io.execute(cmd,log=\"out\",applout=\"<>\")\n #self.getstatus()", "def set_bonus_health(self, status: bool):\n self._bonus_health = status", "def account_status(self, account_status):\n\n self._account_status = account_status", "def _set_status(self, action, status):\n cmd = \"curl http://{}:{}@{}/{}s.cgi?led={}\".format(self.config['username'],\n self.config['password'],\n self.config['host'],\n action,\n status)\n self.log.info(\"PDU cmd: {}\".format(cmd))\n utils.start_standing_subprocess(cmd)\n time.sleep(10)", "def status(self, status):\n allowed_values = [\"ENABLED\", \"DISABLED\"] # noqa: E501\n if (self._configuration.client_side_validation and\n status not in allowed_values):\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def setStatus(self, newStatus):\n self._status = newStatus", "def status_id(self, status_id):\n\n self._status_id = status_id", "def set_status(self, status):\n if not status == self._status:\n self._status = status\n self.winstance.send_event('State changed to ' + self._status)\n\n self.completed = not self.parent_node.is_job or \\\n self._status == 'COMPLETED'\n\n if self.completed:\n self.publish()\n\n if not self.parent_node.is_job:\n self.failed = False\n else:\n self.failed = self.parent_node.is_job and \\\n (self._status == 'BOOT_FAIL' or\n self._status == 'CANCELLED' or\n self._status == 'FAILED' or\n self._status == 'REVOKED' or\n self._status == 'TIMEOUT')", "def id_status(self, id_status):\n self._id_status = id_status", "def set_atom(self, status):\n self._atom = status", "def set_input_status(self, input_status):\n \n self.__input_status = input_status", "async def change_status(self, status: str) -> int:\n data = {'status': str(status)}\n r = await self.request.request(url='https://www.roblox.com/home/updatestatus', method='POST', data=j.dumps(data))\n return r.status_code", "def set_status(self, locked=None, exclusive=None):\n self.locked = locked\n self.exclusive = exclusive", "def set_node_status(self, status):\n self._node.status = status", "async def status(self, ctx, *, status=None):\n # [p]set status <status>\n\n statuses = {\n \"online\": discord.Status.online,\n \"idle\": discord.Status.idle,\n \"dnd\": discord.Status.dnd,\n \"invisible\": discord.Status.invisible\n }\n\n server = ctx.message.server\n\n current_game = server.me.game if server is not None else None\n\n if status is None:\n await self.bot.change_presence(status=discord.Status.online,\n game=current_game)\n await self.bot.say(\"Status reset.\")\n else:\n status = statuses.get(status.lower(), None)\n if status:\n await self.bot.change_presence(status=status,\n game=current_game)\n await self.bot.say(\"Status changed.\")\n else:\n await send_command_help(ctx)", "def status(self, status):\n allowed_values = [\"co\", \"ne\", \"se\", \"vi\", \"si\", \"do\", \"sd\", \"ca\", \"de\", \"ec\", \"es\", \"xp\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def line_status(self, line_status):\n\n self._line_status = line_status", "def status(self, status):\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n allowed_values = [\"Open\", \"Claimed\", \"Held\", \"Closed\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def _set_status(self, action, status):\n raise NotImplementedError(\"Base class: cannot be called directly\")", "async def status(self, ctx:utils.Context, status:str):\n\n status_o = getattr(discord.Status, status.lower())\n await self.bot.change_presence(activity=self.bot.guilds[0].me.activity, status=status_o)", "def set_light_status(self, new_light_status):\n if type(new_light_status) != bool:\n self._logger.write(\"Error! new_light_status should be of type bool\")\n try:\n self._light_status = new_light_status\n except Exception as e:\n self._logger.write(\"Error! could not set light status\")", "def change_dnd_status(self, userID=None, uuid=None, dndStatus=False):\n if uuid:\n axl_resp = self.connector.service.doChangeDNDStatus(uuid=uuid, dndStatus=dndStatus)\n else:\n axl_resp = self.connector.service.doChangeDNDStatus(userID=userID, dndStatus=dndStatus)\n return serialize_object(axl_resp)[\"return\"]", "def change_status(self):\n message = self.state_frame[0]\n self.on_status_update(message)\n self.state = STATE_READ_LINE", "def status(self, status):\n if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n allowed_values = [\"active\", \"locked\", \"disabled\", \"changepassword\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def updateStatus(self, status):\n pass", "def status(self, status):\n if status is None:\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n if status is not None and len(status) < 1:\n raise ValueError(\"Invalid value for `status`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._status = status", "def change_user_status(self, status, client):\n if self.verify_status(status, client):\n client.set_status(status)\n self.send_message('Estado actualizado exitosamente.', client.get_socket())", "def affection_status_switch_on(self):\n self._affection_status_switch = True", "def status(self, status):\n allowed_values = [\"Pending\", \"Running\", \"Success\", \"Failed\", \"Skipped\", \"SuccessWithWarning\", \"Canceled\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def status(self, status):\n allowed_values = [\"loaned\", \"finished\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\".format(status, allowed_values) # noqa: E501\n )\n\n self._status = status", "def set_status(self, status, comment=None):\n\n self.status_history.create(name=status, comment=comment)\n self.status = status", "def on_status(self, status):\n log.debug(\"Received status: %d\", status.id)" ]
[ "0.6324947", "0.6324947", "0.6324947", "0.62151617", "0.6139082", "0.6040312", "0.6012847", "0.60080004", "0.5966004", "0.59111637", "0.5868122", "0.5861913", "0.58358765", "0.58328503", "0.58328503", "0.58328503", "0.58328503", "0.58328503", "0.58328503", "0.58328503", "0.58151054", "0.5798218", "0.5796215", "0.5787214", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.57835853", "0.5770619", "0.576804", "0.57349557", "0.56946373", "0.56946373", "0.56882924", "0.56718624", "0.5650721", "0.56205964", "0.5601485", "0.55694294", "0.55524033", "0.554096", "0.55270684", "0.5526073", "0.55219823", "0.55116856", "0.54612154", "0.5443988", "0.5428685", "0.53897214", "0.5388218", "0.5373411", "0.53690606", "0.53427786", "0.5312545", "0.5290429", "0.52798563", "0.52788925", "0.5273718", "0.5267636", "0.5258715", "0.5228817", "0.5224759", "0.52087903", "0.51957947", "0.5192397", "0.5169285", "0.5162045", "0.51374865", "0.5131017", "0.5118828", "0.5113478", "0.50786227", "0.5078562", "0.50781775", "0.5071175", "0.5067916", "0.5066444", "0.5063982", "0.50437593", "0.502794", "0.5023358" ]
0.8169267
0
Sets the dialstring of this Dial.
def dialstring(self, dialstring): self._dialstring = dialstring
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def string_value(self, string_value):\n\n self._string_value = string_value", "def setString(self, name: unicode, value: unicode) -> None:\n ...", "def setstring(self):\n self._str = 's '+' '.join([self.src, self.start, self.size,\n self.strand, self.srcSize, self.text])+'\\n'", "def set_text( self, a_string ):\n self.a_string_var.set( a_string )", "def set_text( self, a_string ):\n self.a_string_var.set( a_string )", "def set_gift_conversation(self, conversation_string):\r\n self.gift_conversation = conversation_string", "def setInputString(self, inputString):\n assert isinstance(inputString, basestring), \\\n \"Invalid template string!\"\n\n self.__inputString = inputString", "def add_string(self, str):\n self.__add_source_data(str)", "def measurement_unit_string(self, measurement_unit_string):\n\n self._measurement_unit_string = measurement_unit_string", "def query_str(self, new_query_str):\n self.query_buffer.text = new_query_str", "def dialstatus(self, dialstatus):\n if dialstatus is None:\n raise ValueError(\"Invalid value for `dialstatus`, must not be `None`\") # noqa: E501\n\n self._dialstatus = dialstatus", "def setHint( self, hint ):\n self._urlEdit.setHint(hint)", "def setiddname(self, iddname, testing=False):\n self.iddname = iddname\n self.idd_info = None\n self.block = None", "def custom_string(self, custom_string):\n\n self._custom_string = custom_string", "def __init__(self, string):\n self.string = string", "def set_raw_string(self, string, length):\n if len(string) != length:\n raise ValueError('Length of passed string does not match length')\n self.originstring = string\n self.stringlength = length", "def set_dispute_contact_state(self, state):\n if state == \"\":\n state = self.random_string_generator(6, string.ascii_uppercase)\n self.set_value_into_input_field(self.dispute_contact_state_textbox_locator, state)", "def 置项目文本(self, n, string): # real signature unknown; restored from __doc__\n self.SetString(n, string)", "def set_response(self, response_str):\r\n input_css = \"textarea.short-form-response\"\r\n self.q(css=input_css).fill(response_str)", "def __init__(self, number=None, **kwargs):\n super(Dial, self).__init__(**kwargs)\n if number:\n self.value = number", "def set_adapter_name(self, sAdapterName):\n\t\tcall_sdk_function('PrlVirtNet_SetAdapterName', self.handle, sAdapterName)", "def sendString(self,string):\n\t\tif self.outChannel==None:\n\t\t\traise Exception, \"before calling sendString() on this EpicsAsysnSerialInterface object first call configure() to open the epics channels\"\n\t\tself.outChannel.caput(string)", "def setSnr(tel, snr):\n simuConfig[\"SNRS\"] = snr", "def set_stock_sym_append_str(self, append_str):\n self.com_data_stock_portion_additional_url = append_str", "def dial(address: str, network: Optional[str]=None):\n return NotImplementedError()", "def messier_name(self, messier_name):\n\n self._messier_name = messier_name", "def setModemInitString(self, initString, unitCode=0):\n resp = self.XAPCommand('MINIT', initString, unitCode=unitCode)\n return resp", "def addstr(self,name,string):\n\t\tself.windows[name].addstr(string)", "def set_stock_sym_append_str(self, append_str):\n self.cur_quotes_stock_portion_additional_url = append_str", "def setName(self, name):\n self.name = str(name)", "def setDrone(self, newDrone):\n self._drone = newDrone", "def from_string(cls, dlstr):\n raise NotImplementedError(\"Should be implemented by subclass\")", "def fromString(cls, string):\n raise NotImplementedError(\n 'fromString is not implemented on %r' % (cls.__name__,))", "def __init__(self, string: str):\r\n self.string = string", "def store_string(self, string: str) -> None:", "def set_bandname(self,value):\n if value is not None:\n if type(value) != str and type(value) != np.string_:\n raise TypeError(\"The bandname must be a string\", type(value))\n \n self._properties[\"bandname\"] = value", "def from_string(cls, dlstr):\n\n NotImplementedError(\"Should be implemented by subclass\")", "def set_designator(self, ref):\n self.ref = ref", "def setConnector(self, connector):\n self.connector = connector.lower()\n if not (self.connector in self.connectorStrings.keys()):\n raise InvalidConnectorException", "def s(self, s):\n\n self._s = s", "def set_cd(self, cd):\n self.__cd = cd", "def set_iri(self, iri):\n try:\n self.uri = self.iri_to_uri(iri)\n except (ValueError, UnicodeError), why:\n self.http_error = httperr.UrlError(why[0])\n return\n if not re.match(\"^\\s*%s\\s*$\" % URI, self.uri, re.VERBOSE):\n self.add_note('uri', rs.URI_BAD_SYNTAX)\n if '#' in self.uri:\n # chop off the fragment\n self.uri = self.uri[:self.uri.index('#')]\n if len(self.uri) > MAX_URI:\n self.add_note('uri',\n rs.URI_TOO_LONG,\n uri_len=f_num(len(self.uri))\n )", "def write(self, string):\r\n string = string.strip()\r\n if string != \"\":\r\n self.gox.signal_debug(self, string)", "def add_string(self, name, **kwargs):\n self.add(Flags.StringFlag(name, **kwargs))", "def state(self, state: str):\n\n self._state = state", "def state(self, state: str) -> None:\n self._state = state", "def register_string(self,string):\n return self.registry.register_string(string)", "def diet_label(self, diet_label):\n\n self._diet_label = diet_label", "def __init__(self, string: str) -> None:\r\n self.string = string", "def stringbuilderexpr(self, stringbuilderexpr) :\n\t\ttry :\n\t\t\tself._stringbuilderexpr = stringbuilderexpr\n\t\texcept Exception as e:\n\t\t\traise e", "def data_type_string(self, data_type_string):\n\n self._data_type_string = data_type_string", "def state_option(self, byte):\n if byte in telnet_opts:\n self.telnet_cmd.append(byte)\n self.handle_telnet_cmd(self.telnet_cmd)\n self.next_fn = self.state_text", "def dn(self, dn):\n\n self._dn = dn", "def telephone(self, telephone: str):\n\n self._telephone = telephone", "def set_proxy_unsafe(self, proxy_string):\n\t\tself.__proxy = proxy_string", "def SetLabel(self, s):\r\n\r\n self.label = s", "def set_uri(self, uri):\n self.__uri = uri", "def set(self, **kwargs: Dict[str, Any]): # bad naming\n warnings.warn(\"set() is deprecated; use connect().\", DeprecationWarning)\n self.connect(**kwargs)", "def wm_string_dial(self):\n return \"{1:{0}}\".format(self._prec(), self.wm_dial())", "def state(self, state):\n # type: (string_types) -> None\n\n if state is not None:\n if not isinstance(state, string_types):\n raise TypeError(\"Invalid type for `state`, type has to be `string_types`\")\n\n self._state = state", "def add_comment(self, string):\n if self.comment is None:\n self.comment = string\n else:\n self.comment = self.comment.rstrip() + \"\\n \" + string", "def uri(self, uri):\n self._uri = uri", "def uri(self, uri):\n self._uri = uri", "def set_uri(self, uri):\r\n self.uri = uri", "def set_nick(self, nick):\n raise NotImplementedError", "def set_lyrics(self, lyrics: str) -> None:\n self.lyrics = lyrics", "def __init__(self, uri, **kwargs):\n super(Sip, self).__init__(**kwargs)\n self.value = uri", "def setSNR(self, SNR):\n \n self.SNR = SNR", "def uri(self, uri):\n\n self._uri = uri", "def uri(self, uri):\n\n self._uri = uri", "def text(self, str):\n self.__r.t._setText(_to_unicode(str))", "def setName(self, *args):\n return _libsbml.Port_setName(self, *args)", "def set_comment(self, comment):\n self.comment_text = str(comment)", "def auth_uri(self, auth_uri):\n\n self._auth_uri = auth_uri", "def setDebugError(self, errStr):\n if self.ioLoopInst is not None:\n cmd = {'cmd': 'debugError', 'error': errStr}\n self._sendMessageToWeb(cmd)\n else:\n print(\"DebugError: \" + errStr)", "def contact_name(self, contact_name):\n\n self._contact_name = contact_name", "def contact_name(self, contact_name):\n\n self._contact_name = contact_name", "def 置现行选中项文本(self, string): # real signature unknown; restored from __doc__\n return self.SetStringSelection(string)", "def dial(phone_number):\n call = client.calls.create(\n to='+{}'.format(phone_number),\n from_=twilio_phone_number,\n url=twiml_instructions_url,\n )\n print(call.sid)\n return \"dialing +{}. call SID is: {}\".format(phone_number, call.sid)", "def set_repin(self, repin):\n self.repin = repin", "def getString(self):\n print \"Enter String value:\",\n self.string = raw_input()", "def set_formatstring(self, formatstring):\n self._logger_formatstring = formatstring\n\n # 각 핸들러에 포매터를 지정한다.\n self._logger_file_handler.setFormatter(self._Logger_formatstring)\n self._logger_stream_handler.setFormatter(self._Logger_formatstring)", "def type(self, string):\n\n\t\tself._interface.type(string)", "def __init__(self, string):\n Rule.__init__(self)\n self.__string = string", "def challenge_name(self, challenge_name):\n\n self._challenge_name = challenge_name", "def command(self, string):\n #Remove beginning of string\n string = string[8:]\n #Find whitespace after command name and assign it to cmd\n index = string.index(\" \")\n cmd = string[:index]\n #Remove command from string\n string = string[index + 1:]\n\n if cmd == \"print\":\n self.print(string)\n elif cmd == \"open\":\n self.open(string)\n elif cmd == \"show\":\n print(string)\n self.show()\n elif cmd == \"write\":\n self.write(string)\n elif cmd == \"rename\":\n self.rename(string)\n elif cmd == \"find\":\n self.find(string)\n elif cmd == \"where\":\n self.where_am_i()\n elif cmd == \"new\":\n self.new_folder(string, index)\n elif cmd == \"clear\":\n self.clear_history()\n elif cmd == \"go\":\n self.go(string)\n elif cmd == \"copy\":\n self.copy(string)", "def set_name(self,name):\n if not isinstance(name,(str)):\n raise TypeError('name must be string')\n else:\n self._name = name", "def s(self, s) :\n\t\ttry :\n\t\t\tself._s = s\n\t\texcept Exception as e:\n\t\t\traise e", "def reset(self):\n self.string = self.axiom", "def set_address(self, address):\n pass", "def set_answer(self, part, answer):\n if isinstance(answer, str):\n self.answers[part] = answer\n else:\n try:\n self.answers[part] = \" \".join(map(repr, answer))\n except TypeError:\n self.answers[part] = repr(answer)", "def facility_name(self, facility_name):\n\n self._facility_name = facility_name", "def repaid(self, repaid):\n\n self._repaid = repaid", "def repaid(self, repaid):\n\n self._repaid = repaid", "def crf_name(self, crf_name):\n\n self._crf_name = crf_name", "def __init__(self):\n self.string = None", "def setName(self, name):\n\t\tself.label.setText(name)", "def reference(self, new_reference):\n\n # Check a type of 'new_reference' parametr\n if not isinstance(new_reference, basestring):\n raise TypeError('string type expected')\n self._reference = new_reference", "def setName(self, name): \n\n self._name = name", "def set_option_skip_url_string(self, string, apikey=''):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionSkipURLString/', {'String': string, 'apikey': apikey})))" ]
[ "0.5384727", "0.5367918", "0.52513164", "0.5229012", "0.5229012", "0.51429296", "0.4930052", "0.49152938", "0.48876902", "0.48603123", "0.48325157", "0.47601306", "0.47166997", "0.46935034", "0.46845663", "0.46628696", "0.46405885", "0.4633442", "0.4625754", "0.46073928", "0.46067485", "0.45958725", "0.45811033", "0.4579079", "0.45759702", "0.45693684", "0.4563997", "0.45204884", "0.4519031", "0.45180133", "0.45149353", "0.4511215", "0.4508206", "0.4497451", "0.44493866", "0.4445668", "0.44427666", "0.44425538", "0.44410694", "0.44383478", "0.4431584", "0.44306982", "0.4427242", "0.4420787", "0.44103217", "0.44040433", "0.44028178", "0.43860325", "0.43812418", "0.43780193", "0.43737313", "0.437187", "0.43631947", "0.4351189", "0.43440408", "0.43423635", "0.43389672", "0.4338813", "0.4336371", "0.43109018", "0.43062496", "0.43033147", "0.43033147", "0.43007502", "0.42951643", "0.42835623", "0.42830163", "0.42824537", "0.42819908", "0.42819908", "0.4276925", "0.42708465", "0.42535433", "0.4248504", "0.424571", "0.4244834", "0.4244834", "0.4230783", "0.42283213", "0.4227479", "0.42222542", "0.4219107", "0.42182022", "0.4216207", "0.4210025", "0.42072073", "0.42023826", "0.41984987", "0.4189196", "0.41891238", "0.41856095", "0.4185434", "0.4176784", "0.4176784", "0.41751558", "0.41733658", "0.41719007", "0.41709736", "0.4170724", "0.41690376" ]
0.8753864
0
Sets the forward of this Dial.
def forward(self, forward): self._forward = forward
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward( self ):\n self._has_change = True\n print( \"Forward\" )", "def fastforward(self):\n self.run_command('fastforward')", "def forward(self):\n self.cursor.forward()", "def forward(self):\n pass", "def forward(self):\n pass", "def go_forward(self):\n command = _build_robovac_command(RobovacModes.GO_FORWARD, RobovacCommands.MOVE)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)", "def forwarded(self, forwarded):\n\n self._forwarded = forwarded", "def forward(self):\n self.position += 1", "def set_forward_trig(self, trig):\n\t\tself.forward_trig = trig", "def forward(self, speed):\n self.controller.forward(speed)", "def forward(self) -> None:\n self.system.notify(\"Jarvis::Forward\")\n self.media.fast_forward()", "def move_forward(self, dist):\r\n self.send_command_without_response(f'forward {dist}')", "def drive_forward(self):\n print(f\"{self.make.title()} is now driving forward.\")", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self, *args: Any, **kwargs: Any) -> None:\n self._check_for_increment(\"forward\")\n return self[-1](*args, **kwargs)", "def start_forward(self, velocity=VELOCITY):\n action = StartForward(velocity=velocity)\n self._velocity_control_client(pickle.dumps(action))", "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def forward(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, True)\n GPIO.output(11, False)\n GPIO.output(13, True)\n GPIO.output(15, False)\n # time.sleep(sec)\n motor_direction = 'Forward'\n return motor_direction", "def forward(self,distance):\n assert (type(distance) in [int, float]), \"parameter distance:%s is not a valid number\" % `distance`\n self._turtle.forward(distance)", "def forward(self, *args, **kwargs):\n pass", "def forward(self, x):\n pass", "def fastforward(self, /, noerror=False):\n\t\tif not self in _running:\n\t\t\tif noerror: return\n\t\t\traise RuntimeError('Not running')\n\t\t_running.remove(self)\n\t\t_anim_stopped(self)\n\t\tfor attr in self._end:\n\t\t\tsep = attr.split('__')\n\t\t\tsubtarget, subattr = eval('.'.join(['self.target']+sep[:-1])), sep[-1]\n\t\t\tsetattr(subtarget, subattr, self._end[attr])", "def move_forward(self, distance):\r\n return self.move('forward', distance)", "def forward(\n self\n ) -> None:\n if not self._forward_page_history_stack:\n # Do nothing if there is no forward page history.\n return\n\n self._back_page_history_stack.append(self._current_page)\n self._current_page = self._forward_page_history_stack.pop()", "def base_forward(self, x):\r\n pass", "def set_port_forward_list(self, nPortFwdType, hPortFwdList):\n\t\tcall_sdk_function('PrlVirtNet_SetPortForwardList', self.handle, nPortFwdType, conv_handle_arg(hPortFwdList))", "def left_forward(self):\n self.left_motor.run_forever(speed_sp=self.MAX_SPEED)", "def move_forward(self):\n self.x, self.y = self.compute_positions()", "def move_forward(power):\n message = \"FORWARD:\" + str(power) + '\\n'\n sock.sendall(message)\n return", "def move_forward():\n pass", "def forward(self, x, mask):\n \"Follow Figure 1 for connections.\"\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n return self.sublayer[1](x, self.feed_forward)", "def forward(self, distance):\n self.logger.debug(\"forward \" + str(distance))", "def __window_forward(self):\n pass", "def forward(self, x, **kwargs):\n pass", "def forward_pe(self, forward_pe: float):\n\n self._forward_pe = forward_pe", "def set_follow(self, follow):\n self.follow = follow", "def forward_left(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed) \n self.pwm_right.ChangeDutyCycle(0)\n self.pwm_left.ChangeDutyCycle(100)", "def set_next(self, next_layer):\n self.next_layer = next_layer", "async def skip_forward(self) -> None:\n return await self.relay(\"skip_forward\")()", "def _forward(self, z):\n raise NotImplementedError(\"Forward shouldn't be called!\")", "def forwarder(self, forwarder: ICNForwarder):\n self._forwarder = forwarder", "def forward_pass(self):", "def forward(self):\n print('forward')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def turn_on(self, **kwargs):\n self.enabled = self.fritz_box.set_call_forwarding(self.uid, 1)", "def move_forward(self, distance):\n quad_offset = self.quad_offset_mapping['forward']\n client.moveByVelocityAsync(self.velocity * quad_offset[0], self.velocity * quad_offset[1],\n 0.15, distance/self.velocity).join()\n # if self.logging:\n # self.log_arr.append(\"forward\")", "def forward(faster, fastest):\n roku_master.forward()\n if fastest:\n roku_master.forward()\n roku_master.forward()\n elif faster:\n roku_master.forward()", "def create_forward(self):\n return MessageCreateForwardRequestBuilder(self.append_to_request_url(\"createForward\"), self._client)", "def moveForward(self):\n if self.onGround:\n self.vx = 4", "def forward(self):\n #print('forward\\r')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def forward(self)->None:", "def _goForward(self) -> None:\n if self._stackIndex < -1:\n self._stackIndex += 1\n self._openPath(path=self._pathStack[self._stackIndex], ignoreStack=True)", "def move_forward(self, speed):\n\n # Clamp the speed\n speed = clamp(delta_unit(speed), 0, delta_unit(Car.max_speed))\n\n # Appends the speed according to the direction\n rad = np.radians(self.direction)\n self.fx += speed * np.cos(rad)\n self.fy += speed * np.sin(rad)\n\n # Set marker to move\n self.moved = True", "def forward_to(self):\n if \"forwardTo\" in self._prop_dict:\n if isinstance(self._prop_dict[\"forwardTo\"], OneDriveObjectBase):\n return self._prop_dict[\"forwardTo\"]\n else :\n self._prop_dict[\"forwardTo\"] = Recipient(self._prop_dict[\"forwardTo\"])\n return self._prop_dict[\"forwardTo\"]\n\n return None", "def forward(self, speed=1, **kwargs):\n curve_left = kwargs.pop('curve_left', 0)\n curve_right = kwargs.pop('curve_right', 0)\n if kwargs:\n raise TypeError('unexpected argument %s' % kwargs.popitem()[0])\n if not 0 <= curve_left <= 1:\n raise ValueError('curve_left must be between 0 and 1')\n if not 0 <= curve_right <= 1:\n raise ValueError('curve_right must be between 0 and 1')\n if curve_left != 0 and curve_right != 0:\n raise ValueError(\"curve_left and curve_right can't be used at \"\n \"the same time\")\n self.left_motor.forward(speed * (1 - curve_left))\n self.right_motor.forward(speed * (1 - curve_right))", "def set_direction(self, direction: str) -> None:\n if direction == \"forward\":\n self._bond.setDirection(self._deviceId, Directions.FORWARD)\n elif direction == \"reverse\":\n self._bond.setDirection(self._deviceId, Directions.REVERSE)\n self._attributes['current_direction'] = direction", "def step_forward(self):", "def goForward(current, bck, fwd):\n if fwd.size() < 1:\n print(\"Cannot go forward.\")\n else:\n bck.push(current)\n current = fwd.pop()\n return current", "def forward_button(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=int(left_speed))\n self.right_motor.run_forever(speed_sp=int(right_speed))", "def step_forward(self):\n self.read_value = self.write_value\n self.write_value = None", "def forward(self, *args, **kwargs):\n\n raise NotImplementedError()", "def forward(self, x):\n return x", "def GET_forward(self):\n self.roomba.DriveStraight(pyrobot.VELOCITY_FAST)\n time.sleep(1)\n self.roomba.SlowStop(pyrobot.VELOCITY_FAST)", "def forward(self, *args, **kwargs):\n raise NotImplementedError", "def move_forward(self):\n self.pos += 1\n if self.pos > len(self.text) - 1:\n self.current_char = None\n else:\n self.current_char = self.text[self.pos]", "def forward(self, x):\n raise NotImplementedError", "def forward(self, x):\n raise NotImplementedError", "async def async_set_direction(self, direction: str) -> None:\n if direction == DIRECTION_FORWARD:\n self._device.fan_dir = SENSEME_DIRECTION_FORWARD\n else:\n self._device.fan_dir = SENSEME_DIRECTION_REVERSE", "def move_forward():\n twister = Twist(linear=Vector3(x=0.5,y=0,z=0),angular=Vector3(x=0,y=0,z=0))\n pub.publish(twister)", "def forward(self, x):\n self.x = x\n return x.clamp(min=0)", "def _historyForwardClickedSlot(self):\r\n\r\n steps, success = self._controller.forwardAction.data().toInt()\r\n if success:\r\n self._controller.model.relativeHistoryIndex = steps", "def forward(self, *args):\n raise NotImplementedError", "def forward(self, *args):\n raise NotImplementedError", "def forward(speed, bias, biasDir):\n\t# todo: check directions for me please\n\tif biasDir == 1:\n rightMotor.run_direct(duty_cycle_sp=speed+bias)\n leftMotor.run_direct(duty_cycle_sp=speed)\n elif biasDir == -1:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed+bias)\n else:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed)", "def right_forward(self):\n self.right_motor.run_forever(speed_sp=self.MAX_SPEED)", "def moved_forward(self, distance: \"moves in facing direction\") -> Position:\n newx = self.x + distance * math.cos(self.facing)\n newy = self.y + distance * math.sin(self.facing)\n return Position(newx, newy, self.facing)", "def set_direction(self, direction: str) -> None:\n self.wink.set_fan_direction(direction)", "def left_forward(self, state, speed):\n if state:\n self.left_motor.run_forever(speed_sp=speed)\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.GREEN)\n else:\n self.left_motor.stop()\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.BLACK)", "def forward(self, request, forward, times=None):\n data = {\n 'httpRequest': request.dict(),\n 'httpForward': forward.dict(),\n 'times': {\n 'remainingTimes': 1,\n 'unlimited': True\n }\n }\n if times:\n data['times'] = vars(times)\n req = requests.put('{}/expectation'.format(self._get_url()),\n json.dumps(data))\n return req", "def _forward(self, x, X, upto=None):\n if upto is not None: # cannot use 'if upto' here since it is 0-indexed\n # and layer0 is the first layer\n assert 0<=upto<=self._layer_counter\n counter = upto + 1\n else: counter = self._layer_counter\n\n y_previous, Y_previous = x, X\n # TODO: because we always need to compute F_i(X) at each layer i, this\n # is a huge overhead\n # feedforward\n for i in range(counter):\n layer = getattr(self, 'layer'+str(i))\n y, Y = layer(y_previous, Y_previous), layer(Y_previous, Y_previous)\n y_previous, Y_previous = y, Y\n\n return y", "def drive_forward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)", "def get_forward_position(self):\n return self._forward_position", "def startAcceleratingForward(self,event):\n self.isAcceleratingForward=True", "def goForward(self):\r\n if self.currLoc + 1 < len(self.history):\r\n self.currLoc += 1\r\n return self.history[self.currLoc]", "def forward_train(self, *args, **kwargs):\n return self.detector.forward_train(*args, **kwargs)", "def __post_init__(self):\n if self.steering == Direction.FWD:\n raise ValueError(\"Steering can't be FORWARD.\")", "def _forward(self, X, **kwargs):\n raise NotImplementedError()", "def go_forward(self):\n if not self._items:\n raise ValueError(\"Empty navigation history\")\n\n if self.can_go_forward():\n self._pointer += 1\n return self._items[self._pointer]", "def fastforward(self, /, noerror=False):\n\t\tif not self in _running:\n\t\t\tif noerror: return\n\t\t\traise RuntimeError('Not running')\n\t\tfor anim in self.animations:\n\t\t\tanim.fastforward(noerror=True)\n\t\t_running.remove(self)\n\t\t_anim_stopped(self)", "def fastforward(self, /, noerror=False):\n\t\tif not self in _running:\n\t\t\tif noerror: return\n\t\t\traise RuntimeError('Not running')\n\t\tfor anim in self.animations:\n\t\t\tanim.fastforward(noerror=True)\n\t\t_running.remove(self)\n\t\t_anim_stopped(self)", "def up(self):\n self.forward(MOVE_DISTANCE)", "def setPrev(self, prev_half_edge):\n self.prev = prev_half_edge", "def forward(self, **kwargs):\n return self.a", "def forward(self, x):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\n\t\toutput = self._layers[0].forward(x)\n\t\tfor i in range(1, len(self._layers)):\n\t\t\toutput = self._layers[i].forward(output)\n\t\treturn output\n\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def _set_state_after_forward(self):\n for chunk_id, chunk in self.client.chunk_list.generate_chunk():\n if (\n chunk.get_state() == ChunkState.HOLD\n or chunk.get_state() == ChunkState.HOLD_AFTER_FWD\n ):\n chunk.set_unused()\n self.client.set_all_tensors_state_in_chunk(chunk_id, TensorState.HOLD)", "def move_forward(self, speed):\n\t\t# You should modify the bias of 4 wheels depending on your hardware.\n\t\tself._front_left_wheel.anticlockwise_rotate(speed + LEFT_FR_BIAS + LEFT_RIGHT_BIAS)\n\t\tself._front_right_wheel.clockwise_rotate(speed + RIGHT_FR_BIAS)\n\t\tself._rear_left_wheel.anticlockwise_rotate(speed + LEFT_RIGHT_BIAS)\n\t\tself._rear_right_wheel.clockwise_rotate(speed)", "def forward(self, dist):\n start = (self.pos_x, self.pos_y)\n self.pos_x += dist * math.cos(math.radians(self.angle))\n self.pos_y += dist * math.sin(math.radians(self.angle))\n self._update_limits()\n end = (self.pos_x, self.pos_y)\n if self.pen_down:\n self.draw.line([start, end], fill=self.colour, width=self.width)", "def left_backward(self):\n self.left_motor.run_forever(speed_sp=-self.MAX_SPEED)" ]
[ "0.6834646", "0.67426616", "0.6636892", "0.64269316", "0.64269316", "0.6326287", "0.62784827", "0.62357396", "0.62272525", "0.62225056", "0.6159911", "0.6145522", "0.6091365", "0.6059019", "0.6059019", "0.6059019", "0.60449684", "0.60174173", "0.5994386", "0.5994386", "0.59784514", "0.59499764", "0.59146667", "0.58958226", "0.5845093", "0.581511", "0.5811764", "0.5791648", "0.57873344", "0.57249874", "0.56852454", "0.5641092", "0.5637933", "0.56339556", "0.5633697", "0.56237936", "0.56236285", "0.5542217", "0.55210733", "0.5452389", "0.54416823", "0.54408145", "0.54365814", "0.54351413", "0.542907", "0.54252315", "0.54236746", "0.5418665", "0.54133475", "0.53994584", "0.5390599", "0.53858864", "0.5382688", "0.5380147", "0.53751826", "0.5371306", "0.5354813", "0.535335", "0.53291726", "0.5294724", "0.5270807", "0.5255576", "0.52532387", "0.5236298", "0.5234797", "0.52344203", "0.52322006", "0.5219127", "0.5219127", "0.5207048", "0.5206857", "0.52056724", "0.5199517", "0.5197356", "0.5197356", "0.5191824", "0.51711106", "0.51552486", "0.51490074", "0.5123482", "0.51203126", "0.51185334", "0.5116771", "0.51105815", "0.5109347", "0.5106462", "0.5102059", "0.51007193", "0.50987107", "0.5092955", "0.5090999", "0.5090999", "0.5084155", "0.5080747", "0.5062044", "0.5056712", "0.50556993", "0.5053051", "0.5049966", "0.5048532" ]
0.7644515
0
Sets the forwarded of this Dial.
def forwarded(self, forwarded): self._forwarded = forwarded
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, forward):\n\n self._forward = forward", "def forwarder(self, forwarder: ICNForwarder):\n self._forwarder = forwarder", "def forward(self):\n pass", "def forward(self):\n pass", "def forward( self ):\n self._has_change = True\n print( \"Forward\" )", "def forward(self, *args, **kwargs):\n pass", "def forward_pass(self):", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def fastforward(self):\n self.run_command('fastforward')", "def forward(self, *args, **kwargs) -> Dict[str, Any]:\n pass", "def forward_to(self):\n if \"forwardTo\" in self._prop_dict:\n if isinstance(self._prop_dict[\"forwardTo\"], OneDriveObjectBase):\n return self._prop_dict[\"forwardTo\"]\n else :\n self._prop_dict[\"forwardTo\"] = Recipient(self._prop_dict[\"forwardTo\"])\n return self._prop_dict[\"forwardTo\"]\n\n return None", "def set_port_forward_list(self, nPortFwdType, hPortFwdList):\n\t\tcall_sdk_function('PrlVirtNet_SetPortForwardList', self.handle, nPortFwdType, conv_handle_arg(hPortFwdList))", "def set_forward_trig(self, trig):\n\t\tself.forward_trig = trig", "def set_forwarded_remote_consul_once(self, set_to=True):\n self.FORWARDED_CONSUL_ONCE_ALREADY = set_to", "def forward(self, distance):\n self.logger.debug(\"forward \" + str(distance))", "def forwarder(self) -> ICNForwarder:\n return self._forwarder", "def forward(self, x):\n pass", "def forward(self, x, mask):\n \"Follow Figure 1 for connections.\"\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n return self.sublayer[1](x, self.feed_forward)", "def move_forward(self, dist):\r\n self.send_command_without_response(f'forward {dist}')", "def base_forward(self, x):\r\n pass", "def forward(self, x, **kwargs):\n pass", "def forward(self):\n self.cursor.forward()", "def forward(self, request, forward, times=None):\n data = {\n 'httpRequest': request.dict(),\n 'httpForward': forward.dict(),\n 'times': {\n 'remainingTimes': 1,\n 'unlimited': True\n }\n }\n if times:\n data['times'] = vars(times)\n req = requests.put('{}/expectation'.format(self._get_url()),\n json.dumps(data))\n return req", "def turn_on(self, **kwargs):\n self.enabled = self.fritz_box.set_call_forwarding(self.uid, 1)", "def forward(self, *args, **kwargs):\n\n raise NotImplementedError()", "async def skip_forward(self) -> None:\n return await self.relay(\"skip_forward\")()", "def forward(self, *args: Any, **kwargs: Any) -> None:\n self._check_for_increment(\"forward\")\n return self[-1](*args, **kwargs)", "def forward(self, *args, **kwargs):\n raise NotImplementedError", "def forward(self)->None:", "def set_direct(self, direct):\n self._direct = direct", "def set_proxy(self):", "def __window_forward(self):\n pass", "def forward(self, **kwargs):\n return self.a", "def __post_init__(self):\n if self.steering == Direction.FWD:\n raise ValueError(\"Steering can't be FORWARD.\")", "def forward(self, *args):\n raise NotImplementedError", "def forward(self, *args):\n raise NotImplementedError", "def set_follow(self, follow):\n self.follow = follow", "def _forward(self, z):\n raise NotImplementedError(\"Forward shouldn't be called!\")", "def forward(self) -> None:\n self.system.notify(\"Jarvis::Forward\")\n self.media.fast_forward()", "def local_forward(\n self, remote_host, remote_port, local_host=\"0.0.0.0\", local_port=44556\n ):\n tunnel = SSHTunnelForwarder(\n (self.hostname, self.port),\n ssh_username=self.user,\n ssh_pkey=get_pkey(self.issho_conf[\"ID_RSA\"]),\n remote_bind_address=(remote_host, remote_port),\n local_bind_address=(local_host, local_port),\n )\n tunnel.start()\n return tunnel", "def create_mock_expectation_with_http_forward(self, request, forward, count=1, unlimited=True):\n data = {}\n data['httpRequest'] = request\n data['httpOverrideForwardedRequest'] = forward\n data['times'] = {'remainingTimes': int(count), 'unlimited': unlimited}\n\n self.create_mock_expectation_with_data(data)", "def go_forward(self):\n command = _build_robovac_command(RobovacModes.GO_FORWARD, RobovacCommands.MOVE)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)", "def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)", "def _forward(self, X, **kwargs):\n raise NotImplementedError()", "def setDirect(self, direct):\n self._direct = direct", "def drive_forward(self):\n print(f\"{self.make.title()} is now driving forward.\")", "def proxy_user(self, proxy_user):\n\n self._proxy_user = proxy_user", "def setHook(self, module):\n self.hook = module.register_forward_hook(self.hook_fn)", "def proxydescriptor(self, value: ProxyDescriptor):\n self._proxydescriptor = value", "def set_incoming_port(self, nIncomingPort):\n\t\tcall_sdk_function('PrlPortFwd_SetIncomingPort', self.handle, nIncomingPort)", "def forward(self, addr, display='full', format='json', lang=None):\n\n params = {\n 'addr': addr,\n 'display': display,\n 'format': format,\n 'lang': lang or self.lang,\n }\n\n return self._request('/forward', params)", "def set_direction(self, direction: str) -> None:\n self.wink.set_fan_direction(direction)", "def update(self):\n _LOGGER.debug(self)\n self.enabled = self.fritz_box.get_call_forwarding_status_by_uid(\n self.uid)", "def forward(self, a):\n\n return a", "async def async_set_direction(self, direction: str) -> None:\n if direction == DIRECTION_FORWARD:\n self._device.fan_dir = SENSEME_DIRECTION_FORWARD\n else:\n self._device.fan_dir = SENSEME_DIRECTION_REVERSE", "def forward(self,distance):\n assert (type(distance) in [int, float]), \"parameter distance:%s is not a valid number\" % `distance`\n self._turtle.forward(distance)", "def switch_proxy(self, proxy):", "def test_forward(self):\n validate_forward()", "def __init__(self, fritz_box, call_forwarding_dict):\n self.fritz_box = fritz_box\n self._name = \"callforwarding_\" + call_forwarding_dict['uid']\n self.uid = call_forwarding_dict['uid']\n self.from_number = call_forwarding_dict['from_number']\n self.to_number = call_forwarding_dict['to_number']\n self.connection_type = call_forwarding_dict['connection_type']\n self.enabled = call_forwarding_dict['enabled']", "def forward(self, output, target):\n raise NotImplementedError", "def set_follower(self, follower):\n self.follower = follower", "def forward(self, x):\n return x", "def step_forward(self):", "def set_direction(self, direction: str) -> None:\n if direction == \"forward\":\n self._bond.setDirection(self._deviceId, Directions.FORWARD)\n elif direction == \"reverse\":\n self._bond.setDirection(self._deviceId, Directions.REVERSE)\n self._attributes['current_direction'] = direction", "def _set_state_after_forward(self):\n for chunk_id, chunk in self.client.chunk_list.generate_chunk():\n if (\n chunk.get_state() == ChunkState.HOLD\n or chunk.get_state() == ChunkState.HOLD_AFTER_FWD\n ):\n chunk.set_unused()\n self.client.set_all_tensors_state_in_chunk(chunk_id, TensorState.HOLD)", "def forward_activation(self):\n return self._forward_activation", "def proxy_host(self, proxy_host):\n\n self._proxy_host = proxy_host", "def create_forward(self):\n return MessageCreateForwardRequestBuilder(self.append_to_request_url(\"createForward\"), self._client)", "def setDirection(self,stepDir = 2):\n pass", "def __init__(self, local_port, remote_port, command_runner, port_forward):\n super(FuchsiaSshForwarder, self).__init__()\n self._proc = None\n\n if port_forward:\n assert local_port, 'Local port must be given'\n else:\n assert remote_port, 'Remote port must be given'\n if not local_port:\n # Choose an available port on the host.\n local_port = util.GetUnreservedAvailableLocalPort()\n\n ssh_args = [\n '-N', # Don't execute command\n '-T', # Don't allocate terminal.\n # Ensure SSH is at least verbose enough to print the allocated port\n '-o', 'LogLevel=VERBOSE'\n ]\n ssh_args.extend(forwarder_utils.GetForwardingArgs(\n local_port, remote_port, self.host_ip,\n port_forward))\n\n with tempfile.NamedTemporaryFile() as stderr_file:\n self._proc = command_runner.RunCommandPiped(ssh_args=ssh_args,\n stderr=stderr_file)\n if not remote_port:\n remote_port = forwarder_utils.ReadRemotePort(stderr_file.name)\n\n self._StartedForwarding(local_port, remote_port)", "def forward_once(self, x):\n\t\t#x = F.normalize(self.network(x), p=2)\n\t\tx = self.network(x)\n\t\treturn x", "def forward(\n self\n ) -> None:\n if not self._forward_page_history_stack:\n # Do nothing if there is no forward page history.\n return\n\n self._back_page_history_stack.append(self._current_page)\n self._current_page = self._forward_page_history_stack.pop()", "def forward_train(self, *args, **kwargs):\n return self.detector.forward_train(*args, **kwargs)", "def fastforward(self, /, noerror=False):\n\t\tif not self in _running:\n\t\t\tif noerror: return\n\t\t\traise RuntimeError('Not running')\n\t\t_running.remove(self)\n\t\t_anim_stopped(self)\n\t\tfor attr in self._end:\n\t\t\tsep = attr.split('__')\n\t\t\tsubtarget, subattr = eval('.'.join(['self.target']+sep[:-1])), sep[-1]\n\t\t\tsetattr(subtarget, subattr, self._end[attr])", "def allow_forwarded_traffic(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_forwarded_traffic\")", "def allow_forwarded_traffic(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_forwarded_traffic\")", "def founder(self, founder: object):\n\n self._founder = founder", "def _fwd_set_ruri(target, fwd_result):\n # The Request-URI in the copy's start line MUST be replaced with the URI for this target. If the URI contains\n # any parameters not allowed in a Request-URI, they MUST be removed.\n sip_msg, opts = fwd_result\n sip_msg.ruri = target.clear_not_allowed_parts('ruri')\n return sip_msg, opts", "def forward(self):\n self.position += 1", "def set_redirect_port(self, nRedirectPort):\n\t\tcall_sdk_function('PrlPortFwd_SetRedirectPort', self.handle, nRedirectPort)", "def transfer(self):\n pass", "def forward(self, x):\n raise NotImplementedError", "def forward(self, x):\n raise NotImplementedError", "def forward(self, s):", "def allow_forwarded_traffic(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_forwarded_traffic\")", "def _fwd_max_forwards(fwd_result):\n sip_msg, opts = fwd_result\n try:\n mf = sip_msg.find(MAXFORWARDS_HEADER)\n except Exception as e:\n raise e\n if isinstance(mf, NotFound):\n # If the copy does not contain a Max-Forwards header field, the proxy MUST add one with a field value,\n # which SHOULD be 70.\n sip_msg.set(MAXFORWARDS_HEADER, MaxForwardsHeader(70))\n else:\n mf.decrement()\n sip_msg.set(MAXFORWARDS_HEADER, mf)\n return sip_msg, opts", "def login(self, forwardUrl=None, setcookie=True):\n if not forwardUrl:\n forwardUrl = self.loginForwardUrl\n if not forwardUrl:\n forwardUrl = \"/\";\n if re.match(r'^\\w+:/', forwardUrl):\n self.redirect(forwardUrl, setcookie)\n else:\n self.localRedirect(forwardUrl, setcookie)", "def forward_pe(self, forward_pe: float):\n\n self._forward_pe = forward_pe", "def set_relay_address(self, relay_addr):\n self.relay_addr = self._Address(*relay_addr)", "def toggleIpforward(v):\n file_path = \"/proc/sys/net/ipv4/ip_forward\"\n with open(file_path, \"w\") as f:\n if v.ipForward:\n print(0, file=f)\n v.ipForward = False\n else:\n print(1, file=f)\n v.ipForward = True\n return", "def proxy_connected(self, src, dest, extra):\n self.route.proxy_connected(src, dest, extra=extra)", "def _historyForwardClickedSlot(self):\r\n\r\n steps, success = self._controller.forwardAction.data().toInt()\r\n if success:\r\n self._controller.model.relativeHistoryIndex = steps", "def forward_as_attachment_to(self):\n if \"forwardAsAttachmentTo\" in self._prop_dict:\n if isinstance(self._prop_dict[\"forwardAsAttachmentTo\"], OneDriveObjectBase):\n return self._prop_dict[\"forwardAsAttachmentTo\"]\n else :\n self._prop_dict[\"forwardAsAttachmentTo\"] = Recipient(self._prop_dict[\"forwardAsAttachmentTo\"])\n return self._prop_dict[\"forwardAsAttachmentTo\"]\n\n return None", "def feed_forward_size(self, feed_forward_size):\n if feed_forward_size < 1:\n raise ValueError('The `feed_forward_size` must be > 0.')\n self._internal.set_feed_forward_size(int(feed_forward_size))", "def set_next(self, next_layer):\n self.next_layer = next_layer", "def forward_backpropagation(self, a):\n a = self.forward(a)\n return a", "def goal(self, goal):\n\n self._goal = goal", "def set_hook(self, module):\n self.hook = module.register_forward_hook(self.hook_fn)" ]
[ "0.6887666", "0.6606515", "0.6383124", "0.6383124", "0.62122256", "0.60811234", "0.594021", "0.5932288", "0.5932288", "0.5932288", "0.581016", "0.5741199", "0.57171863", "0.56688905", "0.5637646", "0.5620836", "0.5603269", "0.5560376", "0.5535223", "0.5532661", "0.5506793", "0.55006456", "0.5477857", "0.5435532", "0.5351057", "0.5345362", "0.53316665", "0.5326462", "0.5325653", "0.5322424", "0.5311142", "0.53069985", "0.5295043", "0.52937114", "0.52911013", "0.5268392", "0.5266357", "0.5266357", "0.5254056", "0.52447253", "0.52330565", "0.51898414", "0.5177925", "0.51766676", "0.5120143", "0.50859183", "0.5085076", "0.50737447", "0.50672054", "0.5060708", "0.50436705", "0.4999894", "0.49954537", "0.49888268", "0.498436", "0.49767235", "0.49699336", "0.49676228", "0.49648327", "0.49617532", "0.49611714", "0.49540895", "0.49431068", "0.49299738", "0.491925", "0.49174765", "0.49054062", "0.48799706", "0.487267", "0.48678446", "0.4852692", "0.48448902", "0.48429623", "0.4842312", "0.48413217", "0.48406345", "0.48302025", "0.48302025", "0.48238018", "0.48205352", "0.48156047", "0.48153767", "0.4814189", "0.47935328", "0.47935328", "0.47829", "0.47782964", "0.47713512", "0.47698703", "0.474931", "0.474674", "0.47454566", "0.47241068", "0.47182152", "0.4692022", "0.46911314", "0.4688877", "0.46886718", "0.46865293", "0.4679497" ]
0.81721765
0
Sets the peer of this Dial.
def peer(self, peer): if peer is None: raise ValueError("Invalid value for `peer`, must not be `None`") # noqa: E501 self._peer = peer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setPeer (self, peer):\n\t\tself.peer = peer", "def setPeerToPeerNetwork(self, peerToPeerNetwork):\r\n raise NotImplementedError()", "def peer(self, value: Optional[MicrobitPeer]) -> None:\n if self.__peer is not None:\n self.__peer.remove_listener(self.__execute)\n\n if value is not None:\n value.add_listener(self.__execute)\n\n self.__peer = value\n self.__sync_x()\n self.__sync_y()\n self.__sync_z()\n self.__sync_current_gesture()", "def connect_to_peer(self):\n pass", "def set_peer_working(self, peer_id):\n self.peers[peer_id].set_working_state()", "def peer(self) -> str:\n return \"{0.host}:{0.port}\".format(self.transport.getPeer())", "def add_new_peer(self, peer_id, peer_host, port):\n if peer_id not in self.chain_instance.peer_connect_dict:\n self.chain_instance.peer_connect_dict[peer_id] = {'host': peer_host, 'port': port}", "def add_peer(self, peer_id, peer_ip):\n self.peers.update({peer_id: peer.Peer(peer_ip)})", "def set_port(self, party_port) -> None:\n\n self._port = party_port", "def peer_addresses(self, peer_addresses):\n\n self._peer_addresses = peer_addresses", "def peer(self) -> Optional[MicrobitPeer]:\n return self.__peer", "def peer_device(self) -> \"ASADevice\":\n if self._peer_device is None:\n self._peer_device = self.__class__(\n str(self.peer_ip_address), self.username, self.password, self.secret, self.port, **self.kwargs\n )\n else:\n self._peer_device.open()\n\n log.debug(\"Host %s: Peer device %s.\", self.host, self._peer_device)\n return self._peer_device", "def connect_to_peer(self, uri):\n self.init_socket(uri)", "def set_peer_waiting(self, peer_id, port):\n self.peers[peer_id].set_waiting_state(port)", "def _set_next_hop_learned_from_peer(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"next-hop-learned-from-peer\", rest_name=\"next-hop-learned-from-peer\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"next_hop_learned_from_peer must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"next-hop-learned-from-peer\", rest_name=\"next-hop-learned-from-peer\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__next_hop_learned_from_peer = t\n if hasattr(self, '_set'):\n self._set()", "def setConnectPortal(self,otherPortal):\n self._connectPortal = otherPortal", "def getPeer(self):\n return address.IPv4Address('TCP', *((self.host, self.port) + ('INET',)))", "def connect(self, peer):\n peer.listen()\n time.sleep(0.1)\n client_thread = ClientThread(peer.address, self.message_queue, self.queue_lock, self.on_message_received)\n client_thread.start()\n self.connected_as_client = True # TODO only if successful", "def peer_device(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"peer_device\"), kwargs)", "def connect_peer(self, peer, first=True):\n\n # Filter peers that we have send a connect request in previous loops\n # but we haven't seen a reply, yet.\n if peer.next_heart_beat_time is not None and first:\n logger.log(TRACE, \"%s is waiting for a reply until %r\",\n peer, peer.next_heart_beat_time)\n return\n\n # Compose the message.\n message = Message(\n source=self.app.uuid,\n to=peer.uuid,\n previous_hop=None,\n next_hop=peer.uuid,\n command=self.command_id,\n reply=False,\n handler=self,\n host=self.app.receiver.bind_address,\n port=self.app.receiver.bind_port,\n )\n\n if first:\n # Compute the timeout.\n peer.next_heart_beat_time = self.app.tick + UNRESPONSIVE_THRESHOLD\n peer.slow_heart_beat_down = 0\n logger.log(TRACE, \"First message composed for connect \"\n \"attempt to %s: %r; will wait until %r\",\n peer, message, peer.next_heart_beat_time)\n else:\n # Take into consideration the history of the peer.\n peer.schedule_heart_beat(self.app)\n logger.log(TRACE, \"Message composed for subsequent connect \"\n \"attempt to %s: %r; will wait until %r\",\n peer, message, peer.next_heart_beat_time)\n\n # We directly enqueue the message.\n self.app.sender.connection_queue.enqueue({peer: message})", "def set_follower(self, follower):\n self.follower = follower", "async def store_peers(self, peer: Peer):\n await self.peers.store(peer)", "def set_follow(self, follow):\n self.follow = follow", "def add_peer(self, peer, ws_extra_headers=None, ws_heartbeat=None):\n logger.info(\"Connecting to peer {}\".format(peer))\n return self.connection_manager.get_peer(\n peer,\n reconnect=not self.receptor.config._is_ephemeral,\n ws_extra_headers=ws_extra_headers,\n ws_heartbeat=ws_heartbeat,\n )", "def peer_address(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"peer_address\")", "def connect(self, telegram_bot, message_sender):\n\n self.__telegram_bot = telegram_bot\n self.__message_sender = message_sender", "def connecting_peer(self, peer):\n if peer.next_heart_beat_time < self.app.tick:\n self.declare_no_connection(peer)\n else:\n logger.log(TRACE, \"%s is connecting (has until %r, now is %r)\",\n peer, peer.next_heart_beat_time, self.app.tick)", "def add_peer(self, writer):\r\n address = self.get_address_string(writer)\r\n self.connection_pool[address] = writer\r\n logger.info(\"Added new peer to pool\", address=address)", "def connection_made(self, transport):\n self.transport = transport\n peername = transport.get_extra_info('peername')\n self.ip = peername[0]\n self.client = \"{:s}:{:d}\".format(*peername)\n logger.debug('Connection from {}'.format(peername))\n clients.append(self)\n self.env = envs[self.ip]", "def getPeer(self):\n return \"Peer:PID:\" + str(self.transport.pid)", "def message_sent(self, message):\n with self.app.peers_lock:\n peer = self.app.peers[message.to]\n peer.state_connecting = True", "def setup(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request #TCP socket object for the client\n self.server.clients[(self.ip, self.port)] = self\n self.server.peers.append((self.connection)) \n for client in self.server.clients:\n print(\"Connected client: \", client)\n\n #for peer in self.server.peers:\n # print(\"Peers: \", peer)", "def set_owner(self, owner, is_stream=False):\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner", "async def dial(self, maddr, self_id, options=None):\n host = maddr.value_for_protocol('ip4')\n port = int(maddr.value_for_protocol('tcp'))\n\n reader, writer = await asyncio.open_connection(host, port)\n\n # First: send our peer ID so receiver knows it\n writer.write(id_b58_encode(self_id).encode())\n await writer.drain()\n\n # Await ack for peer id\n expected_ack_str = \"received peer id\"\n ack = (await reader.read(len(expected_ack_str))).decode()\n\n if ack != expected_ack_str:\n raise Exception(\"Receiver did not receive peer id\")\n\n return RawConnection(host, port, reader, writer, True)", "def set_address(self, host, port):\n self.host = host\n self.port = port", "def test_peers_peerid_post(self):\n pass", "def transport(self, transport):\n\n self._transport = transport", "def set(self, **kwargs: Dict[str, Any]): # bad naming\n warnings.warn(\"set() is deprecated; use connect().\", DeprecationWarning)\n self.connect(**kwargs)", "def recipient(self, recipient):\n\n self._recipient = recipient", "def counterparty(self, counterparty):\n\n self._counterparty = counterparty", "def addPeer(self, peerType, peerId):\r\n raise NotImplementedError()", "def setConnectionsBetweenSuperPeers(self, numberOfConnections):\r\n raise NotImplementedError()", "def peer(node_index):\n node = Node.from_index(node_index)\n pub_key = get(node, 'getinfo')['identity_pubkey']\n address = f'{pub_key}@localhost:{node.port}'\n click.echo(click.style(address, fg='green'))", "def AddPeer(self, peer_id):\n host, port = self._LookupPeer(peer_id)\n logging.debug('Adding peer %r %s:%d.' % (peer_id, host, port))\n peer = xmlrpclib.ServerProxy('http://%s:%s' % (host, port))\n self.peers[peer_id] = host, port, peer", "def connect(self, target):\n self.__sink = target", "def set_receiver(self, receiver):\n self.receiver = receiver", "def update(self, prefix, peer, value):\n peer_sym = self.peers.get(peer, None)\n if peer_sym is None:\n peer_sym = self.peers[peer] = peer\n node = self.radix.add(prefix)\n node.data[peer_sym] = value\n return node", "def __set_endpoint(self, endpoint):\n self._endpoint = endpoint\n\n host, port = endpoint.get_address()\n if __debug__: dprint(\"update LAN address \", self._lan_address[0], \":\", self._lan_address[1], \" -> \", self._lan_address[0], \":\", port, force=True)\n self._lan_address = (self._lan_address[0], port)\n\n # at this point we do not yet have a WAN address, set it to the LAN address to ensure we\n # have something\n assert self._wan_address == (\"0.0.0.0\", 0)\n if __debug__: dprint(\"update WAN address \", self._wan_address[0], \":\", self._wan_address[1], \" -> \", self._lan_address[0], \":\", self._lan_address[1], force=True, level='error')\n self._wan_address = self._lan_address\n\n if not self.is_valid_address(self._lan_address):\n if __debug__: dprint(\"update LAN address \", self._lan_address[0], \":\", self._lan_address[1], \" -> \", host, \":\", self._lan_address[1], force=True)\n self._lan_address = (host, self._lan_address[1])\n\n if not self.is_valid_address(self._lan_address):\n if __debug__: dprint(\"update LAN address \", self._lan_address[0], \":\", self._lan_address[1], \" -> \", self._wan_address[0], \":\", self._lan_address[1], force=True)\n self._lan_address = (self._wan_address[0], self._lan_address[1])\n\n # our address may not be a bootstrap address\n if self._lan_address in self._bootstrap_candidates:\n del self._bootstrap_candidates[self._lan_address]\n\n # our address may not be a candidate\n if self._lan_address in self._candidates:\n del self._candidates[self._lan_address]", "def honeypot_peer(self,honeypotids,ip,port):\n req = {\"type\":\"set_peer\",\n \"from\":self.network.mc_id,\n \"to\":honeypotids,\n \"ip\":ip,\n \"port\":port}\n expect_dict = {\"type\":\"peer_set\"}\n msg_list = self.send_receive(req,honeypotids,expect_dict)\n answer = {}\n for msg in msg_list:\n answer[msg[\"from\"]] = [msg[\"ip\"],msg[\"port\"]]\n return answer", "def host(self, host):\n\n self._host = host", "def host(self, host):\n\n self._host = host", "def host(self, host):\n\n self._host = host", "def host(self, host):\n\n self._host = host", "async def resolve_peer(\n self,\n peer_id: Union[int, str]\n ) -> Union[raw.base.InputPeer, raw.base.InputUser, raw.base.InputChannel]:\n if not self.is_connected:\n raise ConnectionError(\"Client has not been started yet\")\n\n try:\n return await self.storage.get_peer_by_id(peer_id)\n except KeyError:\n if isinstance(peer_id, str):\n if peer_id in (\"self\", \"me\"):\n return raw.types.InputPeerSelf()\n\n peer_id = re.sub(r\"[@+\\s]\", \"\", peer_id.lower())\n\n try:\n int(peer_id)\n except ValueError:\n try:\n return await self.storage.get_peer_by_username(peer_id)\n except KeyError:\n await self.send(\n raw.functions.contacts.ResolveUsername(\n username=peer_id\n )\n )\n\n return await self.storage.get_peer_by_username(peer_id)\n else:\n try:\n return await self.storage.get_peer_by_phone_number(peer_id)\n except KeyError:\n raise PeerIdInvalid\n\n peer_type = utils.get_peer_type(peer_id)\n\n if peer_type == \"user\":\n await self.fetch_peers(\n await self.send(\n raw.functions.users.GetUsers(\n id=[\n raw.types.InputUser(\n user_id=peer_id,\n access_hash=0\n )\n ]\n )\n )\n )\n elif peer_type == \"chat\":\n await self.send(\n raw.functions.messages.GetChats(\n id=[-peer_id]\n )\n )\n else:\n await self.send(\n raw.functions.channels.GetChannels(\n id=[\n raw.types.InputChannel(\n channel_id=utils.get_channel_id(peer_id),\n access_hash=0\n )\n ]\n )\n )\n\n try:\n return await self.storage.get_peer_by_id(peer_id)\n except KeyError:\n raise PeerIdInvalid", "def __init__(self, peer_url):\n self.node_url = peer_url\n parsed_url = parse(peer_url)\n if parsed_url.netloc:\n self.url = parsed_url.netloc\n elif parsed_url.path:\n self.url = parsed_url.path\n else:\n raise ValueError('Invalid URL')", "def provider_customer_peering(self, provider, customer):\n self._connect_ases(provider, customer)\n set_community(self, provider, customer.asn, str(provider.asn) + ':1336')\n set_community(self, customer, provider.asn, str(customer.asn) + ':1338')", "def peer_port(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"peer_port\")", "def setParent(self, edge):\n self.parent_edge = edge", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def set_player(self, player):\n\n self._player = player", "def handshake(self, peer_name):\n version = Version()\n self.send_message(peer_name, version)", "async def connect(self, peer_ip, peer_port):\n peer_name = f\"{peer_ip}:{peer_port}\"\n try:\n reader, writer = await open_connection(peer_ip, peer_port)\n self.peers[peer_name] = {\n \"reader\": reader,\n \"writer\": writer,\n \"buffer\": ProtocolBuffer()\n }\n client_coro = create_task(self.connection_handler(peer_name))\n await client_coro\n except CancelledError:\n print(f\"Warning: Task handling connection to {peer_name} canceled.\")\n except NodeDisconnectException:\n print(f\"Warning: Peer {peer_name} disconnected\")\n await self.close_connection(peer_name)\n except ConnectionError:\n print(f\"Error: connection error for peer {peer_name}\")", "def declare_no_connection(self, peer):\n peer.state_no_connection = True\n peer.last_heart_beat_time = self.app.tick\n peer.next_heart_beat_time = \\\n peer.last_heart_beat_time + UNRESPONSIVE_RECONNECT_WAIT\n logger.debug(\"Cannot connect to %s; will attempt again after %r\",\n peer, peer.next_heart_beat_time)", "def connectToSeeds(sock, peerQ):\n for addr, port in iter(peerQ.get, \"STOP\"):\n with lockSocketReq:\n log.debug(f\"Connecting to seed {addr}:{port}\",\"Connect to Seeds\")\n sock.connect(f\"tcp://{addr}:{port}\")\n counterSocketReq.release()\n log.info(f\"Dispatcher connected to seed with address:{addr}:{port})\", \"Connect to Seeds\")", "def remove_peer(self, peer_id):\n del self.peers[peer_id]", "def connectionMade(self):\n peer = self.transport.getPeer()\n log.info(\"Deluge Client connection made from: %s:%s\", peer.host, peer.port)\n # Set the initial auth level of this session to AUTH_LEVEL_NONE\n self.factory.authorized_sessions[self.transport.sessionno] = {}", "def set_ip(self, party_ip) -> None:\n\n self._ip = party_ip", "def discovery(self, discovery):\n self._discovery = discovery", "def __init__(self, caller=None, dialstatus=None, dialstring=None, forward=None, forwarded=None, peer=None): # noqa: E501 # noqa: E501\n\n self._caller = None\n self._dialstatus = None\n self._dialstring = None\n self._forward = None\n self._forwarded = None\n self._peer = None\n self.discriminator = None\n\n if caller is not None:\n self.caller = caller\n self.dialstatus = dialstatus\n if dialstring is not None:\n self.dialstring = dialstring\n if forward is not None:\n self.forward = forward\n if forwarded is not None:\n self.forwarded = forwarded\n self.peer = peer", "def _peer_url(self, path):\r\n return \"http://127.0.0.1:{port}/peer_grading/{path}/\".format(\r\n port=self.server.port, path=path\r\n )", "def test_peers_peerid_get(self):\n pass", "def relay_port(self, pid, remote, local=0):\n\t\tif not self.did_handshake:\n\t\t\traise UsageError(\"Not connected!\")\n\t\tif self.cid is None:\n\t\t\traise UsageError(\"Not in a Group!\")\n\t\ts = socket.socket()\n\t\ttry:\n\t\t\ts.bind((\"localhost\", local))\n\t\t\tport = s.getsockname()[1]\n\t\t\ts.listen(1)\n\t\t\tself.AL.acquire()\n\t\t\tself.ls[s] = {\n\t\t\t\t\"peer\": pid,\n\t\t\t\t\"port\": remote,\n\t\t\t\t\"got\": 0,\n\t\t\t\t\"local\": port,\n\t\t\t\t}\n\t\t\tself.AL.release()\n\t\t\treturn port\n\t\texcept:\n\t\t\tif self.AL.locked():\n\t\t\t\tself.AL.release()\n\t\t\ts.close()\n\t\t\traise", "def register_peer_switch(self, peer_id, in_port):\n if peer_id:\n if peer_id not in self.peersw_tbl:\n self.peersw_tbl[peer_id] = PeerSwitch(peer_id)\n self.peersw_tbl[peer_id].port_no = in_port", "def set_owner(self, owner: Optional[\"STACObject_Type\"]) -> \"Link\":\n self.owner = owner\n return self", "def connection_from_client(self, transport):\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] incomming connection from %s port %s',\n self._sock_id, peer_data[0], peer_data[1])\n\n self._server.tcp_connection_established(self)", "def connection_made(self, transport: BaseTransport) -> None:\n self._transport = cast(DatagramTransport, transport)", "def AddConnectedPeer(self, peer):\n # if present\n self.RemoveFromQueue(peer.address)\n self.AddKnownAddress(peer.address)\n\n if len(self.Peers) > settings.CONNECTED_PEER_MAX:\n peer.Disconnect(\"Max connected peers reached\", isDead=False)\n\n if peer not in self.Peers:\n self.Peers.append(peer)\n else:\n # either peer is already in the list and it has reconnected before it timed out on our side\n # or it's trying to connect multiple times\n # or we hit the max connected peer count\n self.RemoveKnownAddress(peer.address)\n peer.Disconnect()", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def handle_set(self, peer, sender, bus, topic, headers, message):\n if sender == 'pubsub.compat':\n message = compat.unpack_legacy_message(headers, message)\n\n point = topic.replace(topics.ACTUATOR_SET() + '/', '', 1)\n requester = sender\n headers = self._get_headers(requester)\n if not message:\n error = {'type': 'ValueError', 'value': 'missing argument'}\n _log.debug('ValueError: ' + str(error))\n self._push_result_topic_pair(ERROR_RESPONSE_PREFIX,\n point, headers, error)\n return\n\n try:\n self._set_point(requester, point, message)\n except RemoteError as ex:\n self._handle_remote_error(ex, point, headers)\n except StandardError as ex:\n self._handle_standard_error(ex, point, headers)", "def Connect(self):\n self.channel = grpc.insecure_channel(self.address)\n self._setup()", "async def set_player(self, player: Optional[andesite.Player]) -> None:\n ...", "def connection_made(self, transport):\n self.transport = transport\n sock = transport.get_extra_info(\"socket\") \n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) \n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)", "def set_prev(self, p) -> None:\n self.prev = p", "def set_point(self, requester_id, topic, value, **kwargs):\n\n rpc_peer = bytes(self.vip.rpc.context.vip_message.peer)\n return self._set_point(rpc_peer, topic, value, **kwargs)", "def set_pkt(self, pkt):\n self.pkt = pkt", "def SetConnectionPen(self, pen):\r\n\r\n self._dottedPen = pen\r\n self._dirty = True", "def RemoveConnectedPeer(self, peer):\n if peer in self.Peers:\n self.Peers.remove(peer)", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def remove_peer(self, writer):\r\n address = self.get_address_string(writer)\r\n self.connection_pool[address] = writer\r\n logger.info(\"Removed peer to pool\", address=address)", "def set_relay_address(self, relay_addr):\n self.relay_addr = self._Address(*relay_addr)" ]
[ "0.83918417", "0.67424357", "0.6415004", "0.60654163", "0.5945206", "0.5834176", "0.5686866", "0.5547047", "0.54729116", "0.54373324", "0.54167396", "0.53934383", "0.5380653", "0.5360441", "0.5354226", "0.53020024", "0.5296971", "0.5267418", "0.52219296", "0.52138495", "0.5179136", "0.5177158", "0.5096166", "0.50865424", "0.5085722", "0.5077557", "0.50725174", "0.5060611", "0.5054306", "0.50514317", "0.50340754", "0.50315964", "0.5028223", "0.5007202", "0.49856818", "0.4976492", "0.4973516", "0.49508163", "0.494547", "0.49363405", "0.49151668", "0.49141347", "0.49070492", "0.48792067", "0.48732597", "0.4869949", "0.48625696", "0.48400387", "0.48366427", "0.48239934", "0.48239934", "0.48239934", "0.48239934", "0.4822476", "0.48159397", "0.48040804", "0.47751206", "0.47559175", "0.47547302", "0.47547302", "0.47547302", "0.47547302", "0.47547302", "0.47455412", "0.4739802", "0.47356296", "0.4709381", "0.47075027", "0.46872413", "0.4680005", "0.4678373", "0.4678205", "0.46696183", "0.4657531", "0.46567634", "0.4652471", "0.46352407", "0.46313703", "0.46308628", "0.46258932", "0.462159", "0.46187323", "0.46187323", "0.46187323", "0.46187323", "0.46153516", "0.46133575", "0.4610679", "0.4607033", "0.46028537", "0.46024406", "0.4600259", "0.45839247", "0.4577722", "0.45764118", "0.45764118", "0.45764118", "0.45764118", "0.45730948", "0.4571556" ]
0.7684957
1
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(Dial, dict): for key, value in self.items(): result[key] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n for prop in self.__properties__\n if prop in vars(self)\n }\n rv.update(self._props)\n return rv", "def get_properties(self):\n return self.properties", "def get_properties():", "def getProperties():", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def getProperties(self):\n return self.properties", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def json_properties(self):\n attributes = []\n all = vars(self)\n for var in all:\n if var[:1] != '_':\n attributes.append(var)\n if isinstance(self, db.Model):\n properties = self.properties().keys()\n for property in properties:\n if property[:1] != '_':\n attributes.append(property)\n return attributes", "def properties(self) -> Any:\n return pulumi.get(self, \"properties\")", "def _properties(self) -> dict[str, dict[str, str]]:\n schema = self.schema(by_alias=False)\n if schema.get('properties') is not None:\n return schema.get('properties', {})\n return schema.get('definitions', {}).get(self.__class__.__name__, {}).get('properties', {})", "def get_model_properties(self):\n properties = {}\n\n filename = self._get_data_filename(\"modelargs.json\")\n with open(filename, \"r\") as f:\n results = json.loads(f.read())\n properties[\"image_size\"] = results.get(\"image_size\")\n properties[\"num_classes\"] = results.get(\"num_classes\")\n properties[\"model\"] = results.get(\"model\")\n properties[\"name\"] = results.get(\"name\")\n properties[\"filter_size\"] = results.get(\"filter_size\", 3)\n properties[\"increase_factor\"] = results.get(\"increase_factor\", 0)\n self.model = properties[\"name\"] # regardless of the name of the folder, this will get the proper model name (i.e. <modelname>.cntk)\n\n # optional property\n properties[\"trainer\"] = results.get(\"trainer\", \"CNTK 2.2\")\n\n self._ensure_model_file()\n properties[\"size_mb\"] = round(os.path.getsize(self.model_file) / (1000 * 1000))\n\n return properties", "def as_dict(self):\n result = {}\n for attr in self.__attr:\n result[attr] = getattr(self, attr)\n return result", "def to_dict_model(self) -> dict:\n return dict((key, getattr(self, key)) for key in self.__mapper__.c.keys())", "def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))", "def get_modelDict(self):\n return self.__modelDict", "def attributes(self):\n return dict(self.__attributes)", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def to_dict(self):\n result = {}\n for p in self.json_properties():\n value = getattr(self, p)\n if isinstance(value, datetime.datetime):\n value = value.strftime('%s%f')[:-3]\n result[Jsonifiable.transform_to_camelcase(p)] = value\n return result", "def properties(self):\n return self._props", "def properties(self):\n pass", "def to_dict(self):\n d = {}\n for attr in self.__class__.attributes:\n d[attr] = getattr(self, attr)\n return d", "def properties_get(self):\n return self._get('properties')", "def _collect_properties(self):\n properties = {\n 'userid': self.user_id,\n 'title': self.get_fullname()\n }\n if not self.ogds_user:\n return properties\n\n for attribute_name in self.ogds_user_attributes:\n value = getattr(self.ogds_user, attribute_name)\n properties[attribute_name] = value\n return properties", "def getPropertyDict(self):\n \n d = self.getChild('__properties')\n if d:\n return d.getDict()\n else:\n return {}", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def to_dict(self, include=None):\n _MODEL = type(self)\n repr_dict = {}\n if include is None:\n include = []\n for name, prop in _MODEL._properties.iteritems():\n if hasattr(prop, 'public') and getattr(prop, 'public', False):\n include.append(name)\n\n for name in include:\n # check if this property is even allowed to be public\n # or has a value set\n if not hasattr(self, name):\n continue\n\n value = getattr(self, name)\n if type(getattr(_MODEL, name)) == ndb.StructuredProperty:\n if isinstance(value, list):\n items = []\n for item in value:\n items.append(item.to_dict(include=None))\n repr_dict[name] = items\n else:\n repr_dict[name] = value.to_dict(include=None)\n elif isinstance(value, date):\n repr_dict[name] = value.isoformat()\n elif isinstance(value, ndb.Key):\n repr_dict[name] = value.urlsafe()\n else:\n repr_dict[name] = value\n\n if self._key:\n repr_dict['key'] = self.get_key_urlsafe()\n return repr_dict", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k == 'POSSIBLE_METHODS':\n continue\n if k == 'keysamplers':\n properties[k] = [i.to_dict() for i in self.__dict__[k] if hasattr(i,'to_dict')]\n elif k in {'pooler'}:\n properties[k] = self.__dict__[k].to_dict()\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def get_all_properties(cls):\n return ['key', 'id'] + _.keys(cls._properties)", "def get_properties(self):\n return self.name, self.author, self.description, self.fmu_type, self.version, self.guid, self.tool, self.numStates", "def properties(self):\n\n return self._properties", "def ToDict(self):\n atributes_dictionary = {}\n for key, value in self.__dict__.iteritems():\n atributes_dictionary[key] = value\n return atributes_dictionary", "def properties(self):", "def properties(self):", "def properties(self):", "def modelPropertiesDictionary(sql_row_list):\n \n properties_dictionary = \\\n {\n \"id\": sql_row_list[0],\n \"name\": sql_row_list[1],\n \"last_deploy_timestamp\": sql_row_list[2],\n \"active_version\": sql_row_list[3],\n \"build_id\": sql_row_list[4]\n };\n\n return properties_dictionary;", "def as_dict(self):\n data = dict()\n for name in self.fields:\n val = getattr(self, name)\n if isinstance(val, Model):\n val = val.as_dict()\n elif isinstance(val, list) and val and isinstance(val[0], Model):\n val = [sub.as_dict() for sub in val]\n data[name] = val\n return data", "def to_dict(self):\n if self._dict is not None:\n return self._dict\n\n result = {}\n for key in self.ATTRIBUTES:\n value = getattr(self, key)\n if value:\n result[key] = value\n self._dict = result\n return result", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def to_dict(self):\n _dict = {}\n for f in self._meta.fields:\n if f.name == 'created':\n _dict[f.name] = str(f.value_from_object(self))\n else:\n _dict[f.name] = f.value_from_object(self)\n\n return _dict", "def to_dict(self):\r\n return self.__dict__", "def properties(self):\n return None", "def properties(self):\n return None", "def to_dict(self):\n return attr.asdict(self)", "def as_dict(self):\n return self.__dict__", "def _get_model_state(self) -> dict:\n return dict(model=self.model, kwargs=self._model_kwargs)", "def dictify(self):\n return {\n \"name\" : self.name,\n \"lastname\" : self.lastname,\n \"phone\" : self.phone,\n \"email\" : self.email\n }", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n # \"created_by\": self.created_by,\n # \"created_on\": self.created_on,\n # \"modified_by\": self.modified_by,\n # \"modified_on\": self.modified_on\n }", "def properties(self):\r\n return resources.Properties(self)", "def attributes(self):\n params = self.model.param_array\n return {'parameters': params}", "def properties(self, pk):\n return JsonResponse(self._get_properties(pk))", "def to_dict(self):\n return vars(self)", "def to_dict(self):\n\n # Check if is the right instance.\n if isinstance(self, db.Model):\n # construct a dictionary from column names and values.\n dict_representation = {c.name: getattr(self, c.name) for c in self.__table__.columns}\n return dict_representation\n else:\n raise AttributeError(type(self).__name__ + \" is not instance of \" + db.Model.__name__)", "def bson_properties(self):\n return []", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name\n }", "def get_dict(self):\n return", "def to_dict(self):\n return to_dict(self.__dict__)", "def to_json(self):\n properties = self.to_dict()\n if isinstance(self, db.Model):\n properties['id'] = unicode(self.key().id())\n return json.dumps(properties)", "def to_dict(self):", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def get_attributes(self) -> Dict[str, str]:\n pass", "def config(self) -> ModelConfigDict:\n return self.config_obj.to_dict()", "def properties(self):\n return self.properties_with_uid[1:]", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def serialise(self):\n return {\n 'id': self.id,\n 'category_id': self.category_id,\n 'name': self.name,\n 'description': self.description,\n 'quantity': self.quantity,\n 'price': self.price,\n 'user_id': self.user_id\n }", "def getPropertiesAll():", "def get_all_properties(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getAllProperties\", API_VERSION),\n filter=attr.filters.exclude(attr.fields(Body).params),\n ),\n )", "def model_info(self):\n if not self._model_info:\n self._load_model_info()\n try:\n data = json.loads(self._model_info)\n except (TypeError, ValueError):\n data = {}\n return data", "def to_dict(self):\n return {\n 'name': self.get_name(),\n 'description': self.get_description()\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def as_dict(self):\n return self.__dict__", "def to_dict(self):\r\n\r\n return {\r\n 'product_id': self.product_id,\r\n 'product_name': self.product_name\r\n }", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n }", "def asdict(self):\n return attr.asdict(self)", "def to_dict(self) -> dict:", "def getDict(self):\n res = {}\n for attr, value in self.__dict__.iteritems():\n if type(attr) is IntType or type(attr) is StringType or type(attr) is LongType or type(attr) is UnicodeType:\n res[attr] = value\n elif isinstance(attr, datetime.datetime):\n res[attr] = value.isoformat('-')\n \n return res", "def attributes(self):\n return self.__dict.keys()", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def as_dict(self):\n return {c.key: getattr(self, c.key)\n for c in inspect(self).mapper.column_attrs}" ]
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.690053", "0.6881568", "0.6881568", "0.6857664", "0.68415916", "0.68122137", "0.680096", "0.67914945", "0.6757063", "0.6753585", "0.6741746", "0.6741746", "0.6741746", "0.6735291", "0.67126125", "0.6697801", "0.6695801", "0.6689893", "0.6680752", "0.66802895", "0.66802895", "0.66802895", "0.66547817", "0.66495687", "0.6633999", "0.6619567", "0.6619567", "0.66156983", "0.66049474", "0.6590706", "0.6590706", "0.6590206", "0.6587873", "0.65861845", "0.65822417", "0.65794736", "0.65792733", "0.657747", "0.6571183", "0.65662557", "0.65637356", "0.6539919", "0.65396816", "0.65283066", "0.65252614", "0.6513477", "0.65098846", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.6507418", "0.6505772", "0.65015876", "0.64951885", "0.64951885", "0.64951885", "0.64857763", "0.6474329", "0.6469453", "0.64684683", "0.6453606", "0.6453024", "0.6453024", "0.6430734", "0.6429058", "0.6426903", "0.64215595", "0.64201874", "0.6417152", "0.6414739", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.64035517" ]
0.0
-1
Returns the string representation of the model
def to_str(self): return pprint.pformat(self.to_dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n model_fields = get_model_fields(\n opts.model,\n foreign=False,\n m2m=False,\n exclude=self.exclude_from_str\n )\n # TODO: replace the above with the below to remove the get_model_fields call:\n # model_fields = [\n # f for f in opts.get_fields()\n # if f.concrete\n # and not (f.primary_key or f.is_relation or f.name in self.exclude_from_str)\n # ]\n result = \" \".join(\n [\n str(fld.value_from_object(self))\n for fld in model_fields\n if fld.value_from_object(self)\n ]\n )\n return result.strip() or super().__str__()", "def __str__(self):\n return '%s%s' % (self.name, ' - %s' % self.model if self.model else '')", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __repr__(self):\n\n mod = f\"{self.__class__.__name__} Model\"\n try:\n mod += f': {self.filename}'\n except AttributeError:\n pass\n s = [mod]\n for name, v in self.metadata.items():\n s += [f\"{name:16} : {v}\"]\n return '\\n'.join(s)", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def __str__(self):\n return \"DataModel(name={},attributes={},description={})\".format(\n self.name, {a.name: str(a) for a in self.attributes}, self.description\n )", "def model_info(self) -> str:\n return self._model_info(self.model).decode(\"utf-8\")", "def __str__(self):\n return str(self.serialize())", "def __str__ (self) :\n\n return self.as_string()", "def __str__(self):\n\n return self.toString()", "def __str__(self):\n msg = [\n f'{self.model=}',\n f'{self.field=}',\n f'{self.fxx=}',\n f'{self.date=}',\n f'{self.priority=}',\n ]\n return '\\n'.join(msg)", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.extended_object.get_title()\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return grid_search_to_str(self.model)", "def __str__(self):\n return self.toString()", "def __str__(self):\n return str(self.__dict__)", "def __str__(self):\n return str(self.__dict__)", "def to_representation(self) -> str:\n raise NotImplementedError()", "def __str__(self):\n return str(self.obj)", "def __str__(self):\n return self.make_flat()", "def dump_model(self):", "def __str__(self):\n return str(self.__dict__['_obj'])", "def __str__(self) -> str:\n model_str = [\"\\nModel info:\\n\", \" Unimodal encoder:\\n\"]\n\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_encoder[modality]}\")\n\n model_str.append(\"\\n\\n Unimodal decoder:\\n\")\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_decoder[modality]}\")\n\n if self.multimodal_decoder is not None:\n model_str.append(\"\\n\\n Multimodal decoder:\\n\")\n model_str.append(f\" {self.multimodal_decoder}\")\n\n return \"\".join(model_str)", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' most common words: ' + str(self.common_word) + '\\n'\n\n return s", "def to_string(self):\r\n return self.__str__()", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __str__(self):\n return str(self.get_data())", "def __str__(self):\n\n return self.raw_field", "def __str__(self):\n return f\"model {self._name}\"", "def serialize(self):\n\n\t\treturn str(self)", "def __repr__(self):\n \n s = 'text model name: ' + self.name + '\\n' \n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' number of word stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of commas counts: ' + str(len(self.commas_per_sentence)) + '\\n'\n return s", "def __str__(self):\n return self.get_str()", "def serialize(self):\n\n return str(self)", "def __str__(self) -> str:\n if self.name_field:\n return str(getattr(self, self.name_field))\n # noinspection PyUnresolvedReferences\n data = [\n # Collect the string representations of related objects.\n # getattr(self, fk_field.attname) and\n # fk_field.value_from_object(self) would only return the primary\n # key of the related object.\n str(getattr(self, fk_field.name))\n for fk_field in get_model_fields(\n self._meta.model, base=False, foreign=True, m2m=False\n )\n if not fk_field.null\n ]\n if len(data) < 2:\n # Cannot build a more meaningful representation than the default.\n return super().__str__()\n else:\n template = \"{}\" + \" ({})\" * (len(data) - 1)\n return template.format(*data)", "def __str__(self):\n return self.s", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __repr__(self):\n\n # info string\n info = self.model.__repr__()\n info += \"\\n=========================\\n\"\n info += f\"Train data length:\\t\\t{ len(self.train_dataset) }\\n\"\n info += f\"Eval sata length:\\t\\t{ len(self.eval_dataset) }\\n\"\n info += f\"Optimizer:\\t\\t\\t\\t{ str(self.optimizer).split('(')[0] }\\n\"\n info += f\"Criterion:\\t\\t\\t\\t{ str(self.criterion).split('(')[0] }\\n\"\n info += f\"Training Environment:\\t{ self.device.type }\\n\"\n info += f\"Show information:\\t\\t{ 'True' if self.info else 'False' }\\n\"\n info += \"=========================\\n\"\n\n return info", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths))\\\n + '\\n'\n s += ' number of punctuation types: ' + str(len(self.punctuation))\n return s", "def dumps(self, indent=0):\n outstr = \" \"*indent + \"MewloDbModel object '{0}' attribute values:\\n\".format(self.__class__.__name__)\n public_props = (name for name in dir(object) if not name.startswith('_'))\n for name in public_props:\n outstr += \" \"*indent + \"{0}: {1}\\n\".format(name, str(getattr(self,name)))\n return outstr", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.title or str(_(\"Empty title\"))\n\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()" ]
[ "0.85849506", "0.78144926", "0.7789918", "0.77506983", "0.77506983", "0.7712724", "0.7699126", "0.76694965", "0.7650795", "0.7600532", "0.7582587", "0.75700307", "0.75406003", "0.7523009", "0.7515774", "0.75006944", "0.74870557", "0.74870557", "0.7468478", "0.74514323", "0.74448735", "0.7441703", "0.7436406", "0.7413709", "0.74062586", "0.73829687", "0.7360902", "0.7360902", "0.73304814", "0.7326588", "0.73241246", "0.73110706", "0.7310744", "0.7302378", "0.72998", "0.72935885", "0.72920156", "0.72892475", "0.72892475", "0.72892475", "0.72892475", "0.72892475", "0.7280832", "0.726243", "0.72510827", "0.7244596", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446", "0.72252446" ]
0.0
-1
For `print` and `pprint`
def __repr__(self): return self.to_str()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def print_out():\n pass", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def _print(self, *args):\n return _ida_hexrays.vd_printer_t__print(self, *args)", "def _printable(self):\n pass", "def _print_custom(self):\n pass", "def pypprint(*args, **kwargs): # type: ignore\n from typing import Iterable\n\n if len(args) != 1:\n print(*args, **kwargs)\n return\n x = args[0]\n if isinstance(x, dict):\n for k, v in x.items():\n print(f\"{k}:\", v, **kwargs)\n elif isinstance(x, Iterable) and not isinstance(x, str):\n for i in x:\n print(i, **kwargs)\n else:\n print(x, **kwargs)", "def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass", "def print(*args, **kwargs):\n with P_LOCK:\n __builtins__.print(*args, **kwargs)", "def print(self):\n # Your implementation here", "def p(value):\n pp.pprint(value)", "def static_print(*args, __p=print, **kwargs):\n __p(*args, **kwargs)", "def print(self, *args, **kwargs):\n print(*args, **kwargs)", "def pprint(self):\n print(self.pprint_str())", "def pprint(obj):\n for argname in sorted([x for x in dir(obj) if not x.startswith('__')]):\n # Skip callables\n if hasattr(getattr(obj, argname), '__call__'):\n continue\n print(\"{} : {}\".format(argname, getattr(obj, argname)))", "def print_(self, s: str) -> None:", "def my_pprint(obj, intend = 0):\n if isinstance(obj, dict):\n for key, value in obj.items():\n print(intend*\" \"+str(key)+\" : \")\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, list):\n for value in obj:\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, bytes):\n print(\"<binary data>\")\n \n else:\n try:\n print(intend*\" \"+str(obj))\n except UnicodeDecodeError:\n print(intend*\" \"\"<?>\")", "def test_print(chikin):\n chikin.print()", "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "def out(*args):\r\n print(*args)", "def __pprint(object, stream=None, indent=1, width=80, depth=None):\n printer = PrettyPrinterExt(\n stream=stream, indent=indent, width=width, depth=depth)\n printer.pprint(object)", "def DumpPprint(data):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import pprint\n \n text = pprint.pformat(data)\n \n return text", "def repl_print_statements():\n pass", "def test_03_pass_print(self):\n print('Hello World!')", "def p(self):\n self.printstdout = True", "def print(*args, **kwargs):\n new_args = []\n for arg in args:\n if builtins.isinstance(arg, models.Point):\n new_args.append(\"({0}, {1})\".format(arg.x, arg.y))\n else:\n new_args.append(arg)\n\n builtins.print(*new_args, **kwargs)", "def real_print(*args, **kwargs):\n\n kwargs.setdefault('file', real_stdout)\n _python_print_function(*args, **kwargs)", "def to_print_out(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('print')\n else:\n self.output('print')", "def debug_print(debug_data):\n if DEBUG_MODE == \"true\":\n pp.pprint(debug_data)", "def print(self):\r\n self.print_avec_separateur()", "def pprint(self):\r\n for i in self.items():\r\n print '%s => %r'%i", "def pprint(self, parameter_s=''):\n ptformatter = self.shell.display_formatter.formatters['text/plain']\n ptformatter.pprint = bool(1 - ptformatter.pprint)\n print('Pretty printing has been turned',\n ['OFF','ON'][ptformatter.pprint])", "def print(self):\n print(self.pretty_str())", "def test_print4(self):\n writer = StringIO()\n collatz_print(writer, 1, 1, 1)\n self.assertEqual(writer.getvalue(), \"1 1 1\\n\")", "def eprint(*args, **kwargs):\n\tprint(*args, file=sys.stderr, **kwargs)", "def _print(self, *args, **kwargs) -> None:\n # Only print in verbose mode\n if self._verbose:\n arglist = list(args)\n arglist[0] = f\"[buddy-{self._experiment_name}] {args[0]}\"\n print(*arglist, **kwargs)", "def use_pypprint_for_implicit_print(self) -> None:\n if self.implicit_print is not None:\n self.implicit_print.func.id = \"pypprint\" # type: ignore\n # Make sure we import it later\n self.undefined.add(\"pypprint\")", "def test_print(self):\n writer = StringIO()\n collatz_print(writer, 1, 10, 20)\n self.assertEqual(writer.getvalue(), \"1 10 20\\n\")", "def pprint(self):\n return pformat(repr(self))", "def printer(message):\n if VERBOSITY:\n pprint(message)", "def rec_print(p):\n if len(p) == 0:\n return\n t = p.pop(0)\n print t\n rec_print(p)", "def pformat(object):\r\n return PrettyPrinter().pformat(object)", "def printc(*a, **kw):\n print(*a, **kw)", "def pr(x):\n Card.print_pretty_cards(x)", "def debug_print(self, *content):\n if self.debug:\n print(*content)", "def pprint(object, stream=None):\r\n printer = PrettyPrinter(stream=stream)\r\n printer.pprint(object)", "def safe_print(*objs, errors=\"replace\"):\n\tprint(*(to_stdout(str(o), errors) for o in objs))", "def magic_Pprint(self, parameter_s=''):\n \n self.shell.outputcache.Pprint = 1 - self.shell.outputcache.Pprint\n print 'Pretty printing has been turned', \\\n ['OFF','ON'][self.shell.outputcache.Pprint]", "def print_output(tree):\n print_value(tree)\n print_tree(tree)", "def _Print(self, t):\n self.RaiseError(t, \"Print not supported\")", "def vprint(*args, **kwargs ):\n\n forceprint = False\n for key in kwargs:\n if key == \"forceprint\":\n forceprint =kwargs[key]\n \n line = ''\n if debug or forceprint : \n for arg in args:\n line += str(arg) +\" \"\n log = open(exepath + 'pyframe.log', 'a') \n log.write(line + \"\\n\")\n log.close() \n print line", "def eprint(*pargs, **kargs):\n print('\\u001b[31m', end='', file=sys.stderr)\n print(*pargs, file=sys.stderr, **kargs)\n print('\\u001b[0m', end='', file=sys.stderr)", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def _print(self, *args):\n return _ida_hexrays.qstring_printer_t__print(self, *args)", "def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)", "def hook_print():\n sys.stdout = PrintHook()", "def cmdPrint( self, *args):\n return self.cmd( *args, **{ 'verbose': True } )", "def print_list(self):\r\n pass", "def debugprint(obj, depth=-1, print_type=False,\r\n file=None, ids='CHAR', stop_on_name=False):\r\n if file == 'str':\r\n _file = StringIO()\r\n elif file is None:\r\n _file = sys.stdout\r\n else:\r\n _file = file\r\n done = dict()\r\n results_to_print = []\r\n order = []\r\n if isinstance(obj, gof.Variable):\r\n results_to_print.append(obj)\r\n elif isinstance(obj, gof.Apply):\r\n results_to_print.extend(obj.outputs)\r\n elif isinstance(obj, Function):\r\n results_to_print.extend(obj.maker.fgraph.outputs)\r\n order = obj.maker.fgraph.toposort()\r\n elif isinstance(obj, (list, tuple)):\r\n results_to_print.extend(obj)\r\n elif isinstance(obj, gof.FunctionGraph):\r\n results_to_print.extend(obj.outputs)\r\n order = obj.toposort()\r\n elif isinstance(obj, (int, long, float, numpy.ndarray)):\r\n print obj\r\n else:\r\n raise TypeError(\"debugprint cannot print an object of this type\", obj)\r\n for r in results_to_print:\r\n debugmode.debugprint(r, depth=depth, done=done, print_type=print_type,\r\n file=_file, order=order, ids=ids,\r\n stop_on_name=stop_on_name)\r\n if file is _file:\r\n return file\r\n elif file == 'str':\r\n return _file.getvalue()\r\n else:\r\n _file.flush()", "def _get_print_fn(file=sys.stdout):\n def _print_fn(op, xin,):\n for attr in op.attrs:\n temp = getattr(xin, attr)\n if callable(temp):\n pmsg = temp()\n else:\n pmsg = temp\n print(op.message, attr, '=', pmsg, file=file)\n return _print_fn", "def test_print1(self):\n writer = StringIO()\n collatz_print(writer, 100, 200, 125)\n self.assertEqual(writer.getvalue(), \"100 200 125\\n\")", "def printOutput(self):\n pass", "def _print(self, *args):\n return _ida_hexrays.cnumber_t__print(self, *args)", "def setPrint():\n (e,d,sr,sw) = codecs.lookup('utf-8')\n unicode_to_utf8 = sw(sys.stdout)\n sys.stdout = unicode_to_utf8", "def pr(string, verbose):\n if(verbose):\n print(string)", "def print(*args, sep=\" \"):\n pass", "def printv(self, *arg):\n if self.verbose:\n print(*arg)", "def print(self):\n\n print(self)", "def _p(self, *args, level=2, **kwargs):\n if self._verbosity >= level:\n print(*args, **kwargs)", "def test_print2(self):\n writer = StringIO()\n collatz_print(writer, 201, 210, 89)\n self.assertEqual(writer.getvalue(), \"201 210 89\\n\")", "def print_pointers(self):\n\n ### FILL IN ###", "def foo_printer(self):\n print(\"\\nHi I'm {}\".format(self.foo))", "def printed(method):\n\t\tdef wrapper(cls, *args):\n\t\t\tif cls.verbose:\n\t\t\t\treturn method(cls, *args)\n\t\treturn wrapper", "def print_me(self, tabs=0, tab=' '):\n pre = tab*tabs\n print(pre+'Producer:')\n print(pre+' produces:', self._produces)\n print(pre+' consumes:', self._consumes)\n print(pre+' transfer:', self._transfer)\n print(pre+' capacity:', self._capacity)", "def _print(cls, quad):\n\t\tprint(\"\\nLIGHT OUTPUT:\\n<<<<{}>>>>\".format(ast.literal_eval(str(cls.get_address_value(quad.result)))))\n\t\tprint(\"END\")\n\n\t\tvar = cls.get_address_value(quad.result)\n\t\tif isinstance(var, collections.Iterable):\n\t\t\tprint(\"DEEP COPY\")\n\t\t\tcls.print_queue.enqueue(copy.deepcopy(var))\n\t\telse:\n\t\t\tcls.print_queue.enqueue(var)", "def printout(*args, **kwargs):\n console_print(sys.stdout, *args, **kwargs)", "def pprint(x):\n if is_theano_object(x):\n return _gettheano().printing.pprint(x)\n else:\n return str(x)", "def PrettyPrint(self):\r\n print(self.data)\r\n return", "def print(self):\n self.print_avec_separateur(\" \")", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def pprint_helper(self, angle, indent):\n # just here for defining the interface; work is done in subclasses\n pass", "def _pprint(params, offset=0, printer=repr):\n # Do a multi-line justified repr:\n param_names = [p for p in params.keys() if p is not \"cost\"]\n param_names.sort()\n\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, name in enumerate(param_names):\n value = params[name]\n if isinstance(value, float):\n this_repr = '%s=%s' % (name, str(value))\n else:\n this_repr = '%s=%s' % (name, printer(value))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if (this_line_length + len(this_repr) >= 75 or '\\n' in this_repr):\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n # options = np.get_printoptions()\n # np.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines", "def init_printing(pretty_print=True, order=None, use_unicode=None):\n if pretty_print:\n stringify_func = lambda arg: pretty(arg, order=order, use_unicode=use_unicode)\n else:\n stringify_func = sstrrepr\n\n try:\n import IPython\n\n ip = IPython.ipapi.get()\n\n if ip is not None:\n def result_display(self, arg):\n \"\"\"IPython's pretty-printer display hook.\n\n This function was adapted from:\n\n ipython/IPython/hooks.py:155\n\n \"\"\"\n if self.rc.pprint:\n out = stringify_func(arg)\n\n if '\\n' in out:\n print\n\n print out\n else:\n print repr(arg)\n\n ip.set_hook('result_display', result_display)\n return\n except ImportError:\n pass\n\n import __builtin__, sys\n\n def displayhook(arg):\n \"\"\"Python's pretty-printer display hook.\n\n This function was adapted from:\n\n http://www.python.org/dev/peps/pep-0217/\n\n \"\"\"\n if arg is not None:\n __builtin__._ = None\n print stringify_func(arg)\n __builtin__._ = arg\n\n sys.displayhook = displayhook", "def print_verbose(self) -> None:\n print(self)\n if self.meta is not None:\n print(self.meta.__repr__())", "def _print(self, *args):\n return _ida_hexrays.cinsn_t__print(self, *args)", "def my_print(self):\n if self.__size == 0:\n print(\"\")\n return\n [print(\"\") for x in range(0, self.__position[1])]\n for i in range(0, self.__size):\n [print(\" \", end=\"\") for i in range(0, self.__position[0])]\n [print(\"#\", end=\"\") for j in range(0, self.__size)]\n print(\"\")", "def sequential_print_statements():\n pass", "def print_post():\n print('| | |'),", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def debugprint(r, prefix='', depth=-1, done=None, print_type=False,\r\n file=sys.stdout, print_destroy_map=False,\r\n print_view_map=False, order=None, ids='CHAR',\r\n stop_on_name=False, prefix_child=None):\r\n if depth == 0:\r\n return\r\n\r\n if order is None:\r\n order = []\r\n\r\n if done is None:\r\n done = dict()\r\n\r\n if print_type:\r\n type_str = ' <%s>' % r.type\r\n else:\r\n type_str = ''\r\n\r\n if prefix_child is None:\r\n prefix_child = prefix\r\n\r\n def get_id_str(obj):\r\n if obj in done:\r\n id_str = done[obj]\r\n elif ids == \"id\":\r\n id_str = \"[@%s]\" % str(id(r))\r\n elif ids == \"int\":\r\n id_str = \"[@%s]\" % str(len(done))\r\n elif ids == \"CHAR\":\r\n id_str = \"[@%s]\" % char_from_number(len(done))\r\n elif ids == \"\":\r\n id_str = \"\"\r\n done[obj] = id_str\r\n return id_str\r\n\r\n if hasattr(r.owner, 'op'):\r\n # this variable is the output of computation,\r\n # so just print out the apply\r\n a = r.owner\r\n\r\n r_name = getattr(r, 'name', '')\r\n # normally if the name isn't set, it'll be None, so\r\n # r_name is None here\r\n if r_name is None:\r\n r_name = ''\r\n\r\n if print_destroy_map:\r\n destroy_map_str = str(getattr(r.owner.op, 'destroy_map', ''))\r\n else:\r\n destroy_map_str = ''\r\n\r\n if print_view_map:\r\n view_map_str = str(getattr(r.owner.op, 'view_map', ''))\r\n else:\r\n view_map_str = ''\r\n if destroy_map_str and destroy_map_str != '{}':\r\n destroy_map_str = 'd=' + destroy_map_str\r\n if view_map_str and view_map_str != '{}':\r\n view_map_str = 'v=' + view_map_str\r\n\r\n o = ''\r\n if order:\r\n o = str(order.index(r.owner))\r\n already_printed = a in done # get_id_str put it in the dict\r\n id_str = get_id_str(a)\r\n\r\n if len(a.outputs) == 1:\r\n print >> file, '%s%s %s%s \\'%s\\' %s %s %s' % (prefix, a.op,\r\n id_str,\r\n type_str, r_name,\r\n destroy_map_str,\r\n view_map_str,\r\n o)\r\n else:\r\n print >> file, '%s%s.%i %s%s \\'%s\\' %s %s %s' % (prefix, a.op,\r\n a.outputs.index(r),\r\n id_str, type_str,\r\n r_name,\r\n destroy_map_str,\r\n view_map_str,\r\n o)\r\n if not already_printed:\r\n if (not stop_on_name or\r\n not (hasattr(r, 'name') and r.name is not None)):\r\n new_prefix = prefix_child + ' |'\r\n new_prefix_child = prefix_child + ' |'\r\n for idx, i in enumerate(a.inputs):\r\n if idx == len(a.inputs) - 1:\r\n new_prefix_child = prefix_child + ' '\r\n\r\n debugprint(i, new_prefix, depth=depth - 1, done=done,\r\n print_type=print_type, file=file, order=order,\r\n ids=ids, stop_on_name=stop_on_name,\r\n prefix_child=new_prefix_child)\r\n else:\r\n #this is an input variable\r\n id_str = get_id_str(r)\r\n print >> file, '%s%s %s%s' % (prefix, r, id_str, type_str)\r\n\r\n return file", "def bpprint(self, out=None):\n if out is None:\n out = sys.stdout\n print(self.bpformat(), file=out)", "def vprint(expr, **settings):\n\n outstr = vsprint(expr, **settings)\n\n import builtins\n if (outstr != 'None'):\n builtins._ = outstr\n print(outstr)", "def _pprint(params, offset=0, printer=repr):\n # Do a multi-line justified repr:\n options = numpy.get_printoptions()\n numpy.set_printoptions(precision=5, threshold=64, edgeitems=2)\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, (k, v) in enumerate(sorted(params.items())):\n if isinstance(v, float):\n # use str for representing floating point numbers\n # this way we get consistent representation across\n # architectures and versions.\n this_repr = '%s=%s' % (k, str(v))\n else:\n # use repr of the rest\n this_repr = '%s=%s' % (k, printer(v))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if this_line_length + len(this_repr) >= 75 or '\\n' in this_repr:\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n\n numpy.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines", "def print_(*args, **kwargs):\n fp = kwargs.pop(\"file\", sys.stdout)\n if fp is None:\n return\n\n def write(data):\n if not isinstance(data, basestring):\n data = str(data)\n fp.write(data)\n want_unicode = False\n sep = kwargs.pop(\"sep\", None)\n if sep is not None:\n if isinstance(sep, unicode):\n want_unicode = True\n elif not isinstance(sep, str):\n raise TypeError(\"sep must be None or a string\")\n end = kwargs.pop(\"end\", None)\n if end is not None:\n if isinstance(end, unicode):\n want_unicode = True\n elif not isinstance(end, str):\n raise TypeError(\"end must be None or a string\")\n if kwargs:\n raise TypeError(\"invalid keyword arguments to print()\")\n if not want_unicode:\n for arg in args:\n if isinstance(arg, unicode):\n want_unicode = True\n break\n if want_unicode:\n newline = unicode(\"\\n\")\n space = unicode(\" \")\n else:\n newline = \"\\n\"\n space = \" \"\n if sep is None:\n sep = space\n if end is None:\n end = newline\n for i, arg in enumerate(args):\n if i:\n write(sep)\n write(arg)\n write(end)" ]
[ "0.75577617", "0.73375154", "0.6986672", "0.698475", "0.6944995", "0.692333", "0.6899106", "0.6898902", "0.68146646", "0.6806209", "0.6753795", "0.67497987", "0.6744008", "0.6700308", "0.6691256", "0.6674591", "0.6658083", "0.66091245", "0.6606931", "0.6601862", "0.6563738", "0.6561717", "0.65549695", "0.6494838", "0.6473391", "0.64491546", "0.6411177", "0.6340302", "0.6339321", "0.6335031", "0.6332035", "0.6315847", "0.631272", "0.6297732", "0.62969106", "0.6283717", "0.6279154", "0.6271603", "0.62673396", "0.6265511", "0.62629336", "0.6258366", "0.6258278", "0.62501305", "0.6248315", "0.62459755", "0.6244254", "0.6242083", "0.62393075", "0.62156516", "0.6208198", "0.62068796", "0.62062824", "0.62062824", "0.6194123", "0.6189738", "0.6183852", "0.6183035", "0.61697906", "0.61614454", "0.6160741", "0.61544997", "0.61528033", "0.6150831", "0.6147288", "0.61380607", "0.613793", "0.61300766", "0.61278135", "0.6125416", "0.6114217", "0.61126333", "0.6100682", "0.60998785", "0.6096818", "0.6081694", "0.6076982", "0.6072701", "0.6060028", "0.60581726", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6043662", "0.6037599", "0.60336643", "0.6030174", "0.60290223", "0.60242903", "0.6016989", "0.6004274", "0.60005474", "0.60005474", "0.60003483", "0.599558", "0.59923434", "0.5979316", "0.59777945" ]
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): if not isinstance(other, Dial): return False return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if isinstance(self, other.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\r\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n # Ensure same class and values match\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n else:\n return False", "def is_equal(self, a, b):\n return a is b", "def is_equal(self, a, b):\n return a == b", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\r\n if isinstance(other, self.__class__):\r\n return self.__dict__ == other.__dict__\r\n else:\r\n return False", "def is_equal(o1: object, o2: object) -> bool:\n if o1 is None and o2 is None:\n return True\n if o1 is None:\n return False\n return o1 == o2", "def __eq__(self,other):\n return self is other", "def is_equal(self, a, b):\n return a.X[0] == b.X[0]", "def __eq__(self, other):\n return type(self) == type(other) and self.id == other.id", "def __eq__(self, other) -> bool:\n if json.dumps(self.data,sort_keys=True) == json.dumps(other.data,sort_keys=True):\n return True\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Single2HaObject):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__hash__() == other.__hash__()\n return False", "def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n if self.primary != other.primary:\n return False\n return True", "def __eq__(self, other) -> bool:\n if other is None:\n return False\n return self.__hash__() == other.__hash__()", "def __eq__(self, other):\n if not isinstance(other, ObjectInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self: _TT, other: object) -> bool:\n return self.eq(other) # type: ignore", "def __eq__(self, other):\n return id(self) == id(other)", "def __eq__(self, other) -> bool:\n return type(self) == type(other) and \\\n self._id == other.id and \\\n self.code == other.code and \\\n self.name == other.name and \\\n self.gender == other.gender and \\\n self.date_of_birth == other.date_of_birth", "def equals(self, other): # -> bool:\n ...", "def equals(self, obj: object) -> bool:\n ...", "def __eq__(self, other):\n for attr in self._attrs_to_save:\n try:\n if getattr(self, attr) != getattr(other, attr):\n return False\n except AttributeError:\n return False\n return True", "def __eq__(self, other):\n if type(other) is type(self):\n return (self.x == other.x and self.y == other.y and self.z == other.z)\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.x == other.x and self.y == other.y\n return False", "def __eq__(self, other: object) -> bool:\n if not isinstance(other, self.__class__):\n return NotImplemented\n\n return (\n self.name,\n self.submit_at,\n self.subreddit,\n self.title,\n self.body_template,\n ) == (\n other.name,\n other.submit_at,\n other.subreddit,\n other.title,\n other.body_template,\n )", "def __eq__(self, other):\n # Check that we share the same class as this object\n if not isinstance(other, type(self)):\n return False\n\n return hash(self) == hash(other)", "def __eq__(self, other):\n if not isinstance(other, PreviewObjectAutofill):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return equal(self, other)", "def __eq__(self, other: Any) -> bool:\n return self.__class__ is other.__class__ and self.identifier == other.identifier", "def __eq__(self, other):\n return self.__id == other.get_id()", "def __eq__ (self, other):\n if type(self) == type(other):\n return self._m == other._m\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Referent):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.properties == other.properties", "def __eq__(self, other):\n return self.items() == other.items()", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def __eq__(self, other):\n\n if self is other:\n return True\n return hash(self) == hash(other)", "def __eq__(self, other):\n if other._field1 == self._field1:\n return True\n return False", "def same_as(self, other):\n return super().__eq__(other)", "def __eq__(self, other):\n try:\n return other and \\\n self.id == other.id\n\n except AttributeError:\n return False", "def __eq__(self, other):\r\n\t\treturn self._to_pylist() == other._to_pylist()", "def __eq__(self, other):\n if not isinstance(other, Fiddle):\n return False\n\n return self.__dict__ == other.__dict__" ]
[ "0.808826", "0.808826", "0.8054592", "0.79827315", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.79666996", "0.7961202", "0.7961202", "0.7942976", "0.79306227", "0.7926644", "0.78975934", "0.7882431", "0.7882431", "0.7880762", "0.78725857", "0.7868152", "0.78663856", "0.78261256", "0.78199595", "0.7815964", "0.7807595", "0.7807073", "0.7796496", "0.77950644", "0.7784531", "0.77789015", "0.776954", "0.7753856", "0.7746045", "0.77404314", "0.7728747", "0.7725688", "0.7719157", "0.7703603", "0.7686132", "0.7676931", "0.7673879", "0.7665025", "0.7655733", "0.7655552", "0.76282173", "0.7625676", "0.7624168", "0.76236314", "0.76236314", "0.76236314", "0.7616841", "0.7600565", "0.75995135", "0.75960314", "0.75950587", "0.7593984", "0.75895596" ]
0.0
-1
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n return not self.__ne__(other)", "def __ne__(self, other):\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__ (self, other):\n return not self == other" ]
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
Provide video codec vcodec = "h264" acodec = "copy" extra = "" split_cmd = "ffmpeg i '%s' vcodec %s acodec %s y %s" % (file_path, vcodec, acodec, extra) s_cmd = " i '%s' vcodec %s acodec %s"%(file_path, vcodec, acodec)
def split_video_random(file_path, start_pos, split_length, out_path): s_cmd = " -i '%s'"%(file_path) #use default CODEC try: fileext = file_path.split(".")[-1] except IndexError as e: raise IndexError("No ext. in filename. Error: " + str(e)) split_start = start_pos split_length = split_length head, tail = os.path.split(file_path) name, ext = tail.split('.') filebase=name+'_'+str(start_pos)+'-'+str(split_length) dstfilebase = out_path + '/' + filebase # create output file base #split_str = "" #split_str += " -ss " + str(split_start) + " -t " + str(split_length) + " '"+ dstfilebase + "." + fileext + "'" s_str = "" #s_str += "ffmpeg"+" -ss "+str(split_start)+" -t "+str(split_length) + s_cmd + " '"+dstfilebase + "." + fileext + "'" s_str += "ffmpeg" + " -ss " + str(split_start) + s_cmd + " -t " + str(split_length) + " '"+ dstfilebase + "." + fileext + "'" print("########################################################") #print "About to run: "+split_cmd+split_str print("About to run: "+s_str) print("########################################################") #output = subprocess.Popen(split_cmd+split_str, shell = True, stdout = subprocess.PIPE).stdout.read() output = subprocess.Popen(s_str, shell=True, stdout=subprocess.PIPE).stdout.read()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_by_manifest(filename, manifest, vcodec=\"copy\", acodec=\"copy\",\n extra=\"\", **kwargs):\n if not os.path.exists(manifest):\n raise SystemExit\n\n with open(manifest) as manifest_file:\n manifest_type = manifest.split(\".\")[-1]\n if manifest_type == \"json\":\n config = json.load(manifest_file)\n elif manifest_type == \"csv\":\n config = csv.DictReader(manifest_file)\n else:\n raise SystemExit\n\n split_cmd = \"ffmpeg -i '%s' -vcodec %s -acodec %s -y %s\" % (filename,\n vcodec,\n acodec,\n extra)\n split_count = 1\n split_error = []\n try:\n fileext = filename.split(\".\")[-1]\n except IndexError as e:\n raise IndexError(\"No . in filename. Error: \" + str(e))\n for video_config in config:\n split_str = \"\"\n try:\n split_start = video_config[\"start_time\"]\n split_length = video_config.get(\"end_time\", None)\n if not split_length:\n split_length = video_config[\"length\"]\n filebase = video_config[\"rename_to\"]\n if fileext in filebase:\n filebase = \".\".join(filebase.split(\".\")[:-1])\n\n split_str += \" -ss \" + str(split_start) + \" -t \" + \\\n str(split_length) + \\\n \" '\"+ filebase + \".\" + fileext + \\\n \"'\"\n output = subprocess.Popen(split_cmd+split_str,\n shell = True, stdout =\n subprocess.PIPE).stdout.read()\n except KeyError as e:\n raise SystemExit", "def check_video_format(movie_file, desired_format='.mp4', original_format='.avi'):\n\n if not os.path.isfile(movie_file+original_format):\n print 'Error. avi file does not exist:'+movie_file+'.avi'\n if not os.path.isfile(movie_file+desired_format):\n cmd = ['ffmpeg']\n cmd += ['-i', movie_file+original_format]\n cmd += [movie_file+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd])\n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()", "def _transcode_ffmpeg_args(mpeg_filename, mp4_filename, res):\n\n \"\"\"\n 697 ffmpeg -i Chef\\ Wanted\\ With\\ Anne\\ Burrell\\:\\ \\\"The\\ Re-Launch\\\".mpg\n -strict experimental -acodec aac -ac 2 -ab 160k -s 960x540 -vcodec libx264\n -vpre iPod640 -b 1200k -f mp4 -threads 0 chef.conversionmatrixsettings.mp4\n \"\"\"\n return [FFMPEG, \"-i\", mpeg_filename, \"-strict\", \"experimental\",\n \"-acodec\", \"aac\", \"-ac\", \"2\", \"-ab\", \"160k\", \"-s\", res,\n \"-vcodec\", \"libx264\", \"-vpre\", \"iPod640\", \"-b\", \"1200k\",\n \"-f\", \"mp4\", \"-threads\", \"0\", mp4_filename]", "def extract_vob(in_vob, guid):\n\t#Detect interlacing.\n\tmediainfo_command = \"mediainfo --Inform='Video;%ScanType%,%ScanOrder%' \" + in_vob\n\tprint(mediainfo_command)\n\tprocess = subprocess.Popen(mediainfo_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0:\n\t\traise Exception(\"Calling Mediainfo on {in_vob} failed with exit code {exit_code}.\".format(in_vob=in_vob, exit_code=exit_code))\n\tmediainfo_parts = cout.decode(\"utf-8\").split(\",\")\n\tis_interlaced = mediainfo_parts[0] == \"Interlaced\"\n\tfield_order = mediainfo_parts[1].lower().strip()\n\tprint(\"Interlace detection:\", is_interlaced, field_order, \"(\", mediainfo_parts, \")\")\n\n\tffmpeg_command = [\"ffmpeg\", \"-i\", in_vob]\n\tprint(ffmpeg_command)\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\tprocess.wait() #Ignore the exit code. It always fails.\n\tvobinfo = cerr.decode(\"utf-8\")\n\ttracks = []\n\tfor match in re.finditer(r\" Stream #0:(\\d+)\\[0x[0-9a-f]+\\]: (\\w+): ([^\\n]+)\", vobinfo):\n\t\ttrack_nr = match.group(1)\n\t\ttrack_type = match.group(2)\n\t\ttrack_codec = match.group(3)\n\t\tnew_track = track.Track()\n\t\tnew_track.from_vob(track_nr, track_type, track_codec, is_interlaced, field_order)\n\t\tnew_track.file_name = guid + \"-T\" + str(new_track.track_nr) + \".\" + new_track.codec\n\t\tif new_track.type != \"unknown\":\n\t\t\ttracks.append(new_track)\n\n\t#Generate the parameters to pass to ffmpeg.\n\ttrack_params = [\"-i\", in_vob]\n\tfor track_metadata in tracks:\n\t\ttrack_params.append(\"-map\")\n\t\ttrack_params.append(\"0:\" + str(track_metadata.track_nr))\n\t\ttrack_params.append(\"-c\")\n\t\ttrack_params.append(\"copy\")\n\t\ttrack_params.append(track_metadata.file_name)\n\n\t#Extract all tracks.\n\tprint(\"---- Extracting tracks...\")\n\tffmpeg(*track_params)\n\n\treturn tracks", "def write_video_ffmpeg(\n itr: Iterator[np.ndarray],\n out_file: str | Path,\n fps: int = 30,\n out_fps: int = 30,\n vcodec: str = \"libx264\",\n input_fmt: str = \"rgb24\",\n output_fmt: str = \"yuv420p\",\n quite=False\n) -> None:\n\n first_img = next(itr)\n height, width, _ = first_img.shape\n\n stream = ffmpeg.input(\"pipe:\", format=\"rawvideo\", pix_fmt=input_fmt, s=f\"{width}x{height}\", r=fps)\n stream = ffmpeg.output(stream, str(out_file), pix_fmt=output_fmt, vcodec=vcodec, r=out_fps)\n if quite:\n stream = stream.global_args('-loglevel', 'quiet')\n stream = ffmpeg.overwrite_output(stream)\n stream = ffmpeg.run_async(stream, pipe_stdin=True)\n\n def write_frame(img: np.ndarray) -> None:\n stream.stdin.write(as_uint8(img).tobytes())\n\n # Writes all the video frames to the file.\n write_frame(first_img)\n for img in itr:\n write_frame(img)\n\n stream.stdin.close()\n stream.wait()\n print('Done.')", "def reencode(filepath, loglevel='panic'):\n try:\n import ffmpeg\n except ImportError:\n logger.error(\n 'Import Error! Cant import ffmpeg. '\n 'Annotations operations will be limited. import manually and fix errors')\n raise\n if not os.path.isfile(filepath):\n raise IOError('File doesnt exists: {}'.format(filepath))\n # re encode video without b frame and as mp4\n basename, ext = os.path.splitext(filepath)\n output_filepath = os.path.join(basename, os.path.basename(filepath).replace(ext, '.mp4'))\n if not os.path.isdir(os.path.dirname(output_filepath)):\n os.makedirs(os.path.dirname(output_filepath))\n try:\n stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_filepath,\n **{'x264opts': 'bframes=0',\n 'f': 'mp4'})\n ffmpeg.overwrite_output(stream).run()\n except Exception as e:\n logger.exception('ffmpeg error in disassemble:')\n raise\n\n output_probe = Videos.get_info(output_filepath)\n start_time = eval(output_probe['streams'][0]['start_time'])\n fps = eval(output_probe['streams'][0]['avg_frame_rate'])\n has_b_frames = output_probe['streams'][0]['has_b_frames']\n start_frame = fps * start_time\n if start_time != 0:\n logger.warning('Video start_time is not 0!')\n if has_b_frames != 0:\n logger.warning('Video still has b frames!')\n return output_filepath", "def loadVideo( iFileName, iFrameSize = (576, 720) ):\n import sys\n import subprocess as sp\n # ustvari klic ffmpeg in preusmeri izhod v cevovod\n command = [ 'ffmpeg',\n '-i', iFileName,\n '-f', 'image2pipe',\n '-pix_fmt', 'rgb24',\n '-vcodec', 'rawvideo', '-']\n pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)\n # definiraj novo spremeljivko\n oVideo = np.array([])\n iFrameSize = np.asarray( iFrameSize )\n frameCount = 0\n # zacni neskoncno zanko\n while True:\n frameCount += 1\n# print( 'Berem okvir %d ...' % frameCount )\n print(\"\\rBerem okvir %d ...\" % frameCount, end=\"\")\n # preberi Y*X*3 bajtov (= 1 okvir)\n raw_frame = pipe.stdout.read(np.prod(iFrameSize)*3)\n # pretvori prebrane podatke v numpy polje\n frame = np.fromstring(raw_frame, dtype='uint8') \n # preveri ce je velikost ustrezna, sicer prekini zanko\n if frame.size != (np.prod(iFrameSize)*3):\n print(\" koncano!\\n\")\n break;\n # preoblikuj dimenzije in pretvori v sivinsko sliko\n frame = colorToGray( frame.reshape((iFrameSize[0],iFrameSize[1],3)) )\n # sprazni medpomnilnik \n pipe.stdout.flush() \n # vnesi okvir v izhodno sprememnljivko\n if oVideo.size == 0:\n oVideo = frame\n oVideo = oVideo[...,None]\n else:\n oVideo = np.concatenate((oVideo,frame[...,None]), axis=2)\n # zapri cevovod\n pipe.terminate()\n # vrni izhodno spremenljivko\n return oVideo", "def __init__(self,vid_path:str,num_frames:int=None,vid_flow_direction:str='left'):\n \n self.num_frames=num_frames\n if vid_path.split('.')[-1]=='cine' or vid_flow_direction!='left':\n #This is a cine file or needs to be rotated, convert to mp4\n print('Converting .cine file to mp4 (lossless)')\n #detect platform so we can correct file paths for ffmpeg\n is_win=re.compile('.*[Ww]in.*')\n if is_win.match(sys.platform):\n corrected_vid_path='\"'+vid_path+'\"'\n else:\n #Put escape characters in front of spaces in file name\n corrected_vid_path=[]\n for c in vid_path:\n if c==' ':\n corrected_vid_path.append('\\\\')\n corrected_vid_path.append(c)\n corrected_vid_path=''.join(corrected_vid_path)\n if vid_flow_direction=='up':\n rotate='-vf \"transpose=2\" '\n elif vid_flow_direction=='left':\n rotate=''\n elif vid_flow_direction=='right':\n rotate='-vf \"transpose=2,transpose=2\" '\n else:\n raise Exception(\"vid_flow_direction must be 'up', 'left' or 'right'\")\n if num_frames!=None:\n frames='-frames:v {0} '.format(num_frames)\n else:\n frames=''\n os_handle,new_file_path=tempfile.mkstemp(suffix='.mp4')\n #close file, we don't work with it directly\n os.close(os_handle)\n ffmpeg_command='ffmpeg -y -i {orig_file} {frames}{rotate}-f mp4 -crf 0 {new_file}'.format(orig_file=corrected_vid_path,rotate=rotate,new_file=new_file_path,frames=frames)\n print(ffmpeg_command)\n list(os.popen(ffmpeg_command))\n self.vid_path=new_file_path\n self.delete_file=True\n stats=os.stat(new_file_path)\n if stats.st_size==0:\n raise Exception('File conversion failed, check that ffmpeg is on PATH')\n else:\n #Not a cine\n self.vid_path=vid_path\n self.delete_file=False", "def avi2mpg(filename):\n assert filename.endswith('.avi')\n ofile = '%s.mpg' % os.path.splitext(filename)[0]\n run_shell_cmd('ffmpeg -y -i %s -qscale:v 1 %s' % (filename, ofile), ignore=True)\n return ofile", "def mpg2avi(filename):\n assert filename.endswith('.mpg')\n ofile = '%s.avi' % os.path.splitext(filename)[0]\n run_shell_cmd('ffmpeg -y -i %s -qscale:v 2 %s' % (filename, ofile), ignore=True)\n return ofile", "def makeVideo():\n os.system(\"cd video && ffmpeg -r 10 -i img%05d.jpg -vcodec mpeg4 -y caronthehill_clip.mp4\")", "def splice_clips(list_file, output_file):\n cmd = ['ffmpeg']\n cmd += ['-f', 'concat']\n cmd += ['-i', list_file]\n cmd += ['-c', 'copy']\n cmd += ['-y']\n cmd += [output_file + '_clips.mp4']\n #cmd += ['> /dev/null 2>&1 < /dev/null'] \n\n cmd_string = ''.join([\"%s \" % el for el in cmd])\n print \"Splicing clips: \", cmd_string\n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE)#, stderr=subprocess.PIPE)\n p.wait()", "def transcodetomp4(file_in, logger):\n\n import subprocess\n\n file_out = file_in.replace('.mkv', '.mp4')\n\n if os.path.isfile('/usr/bin/avconv'):\n\n convert_command = 'su securityspy -c \\\"/usr/bin/avconv -i \"{}\" -f mp4 -vcodec copy -acodec '.format(file_in) + \\\n 'libfaac -b:a 112k -ac 2 -y \"{}\"'.format(file_out) + \"\\\"\"\n\n try:\n subprocess.check_call(convert_command, shell=True)\n except subprocess.CalledProcessError:\n logger.error(\"The command to transcode: {} --- failed...\".format(convert_command))\n return file_in\n\n return file_out\n else:\n return file_in\n # fin", "def make_video(data,\n xdim, ydim, sample_read_rows, sample_read_cols, image_write_rows, image_write_cols,\n directory, filename, fps = 24.0, start_frame = 1, end_frame = None, timestamp = False, fontsize = 30, ts_pos = (0,0), save_raw = False):\n\n #Command to send via the command prompt which specifies the pipe parameters\n # command = ['ffmpeg',\n # '-y', # (optional) overwrite output file if it exists\n # '-f', 'image2pipe',\n # '-vcodec', 'mjpeg', #'mjpeg',\n # '-r', '1',\n # '-r', str(fps), # frames per second\n # '-i', '-', # The input comes from a pipe\n # '-an', # Tells FFMPEG not to expect any audio\n # '-vcodec', 'mpeg4',\n # '-b:v', '5000k',\n # directory + filename + \"/\"+filename+\".mp4\",\n # '-hide_banner',\n # '-loglevel', 'panic']\n\n # Create directories if they don't exist\n if not os.path.exists(os.path.join(directory, filename, 'frames/')):\n os.makedirs(os.path.join(directory, filename, 'frames/'))\n if save_raw and not os.path.exists(os.path.join(directory, filename, 'frames-raw/')):\n os.makedirs(os.path.join(directory, filename, 'frames-raw/'))\n\n if end_frame == None:\n end_frame = data.FrameCount\n\n cm = colormap.get_cmap('viridis')\n\n for i, frame_offset in enumerate(tqdm.tqdm(range(start_frame, end_frame))):\n frame = FrameRead(data, frame_offset)\n frame_image = np.zeros([ydim, xdim], dtype=np.uint8)\n frame_image[image_write_rows, image_write_cols] = frame.frame_data[sample_read_rows, sample_read_cols]\n\n rgb_im = Image.fromarray(cm(frame_image, bytes=True)).convert('RGB')\n rgb_im.save(os.path.join(directory, filename, 'frames/', f'{i}.jpg'), 'JPEG')\n\n if save_raw:\n Image.fromarray(np.uint8(frame.frame_data), mode='L').save(os.path.join(directory, filename, 'frames-raw/', f'{i}.jpg'), 'JPEG')", "def prepare_video(path_to_video: str, number_of_images=87) -> None:\n\n temp_video = path.join(path_to_video, 'temp_outpy.mp4')\n video = path.join(path_to_video, 'outpy.h264')\n\n # create mp4 video for metadata and compute video duration\n subprocess.run(['ffmpeg', '-i', video, '-c', 'copy', temp_video])\n result = subprocess.run([\"ffprobe\", \"-v\", \"error\", \"-show_entries\",\n \"format=duration\", \"-of\",\n \"default=noprint_wrappers=1:nokey=1\", temp_video],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n video_duration = float(result.stdout)\n\n # create images folder\n path_to_images = path.join(path_to_video, 'images')\n if path.exists(path_to_images) and path.isdir(path_to_images):\n shutil.rmtree(path_to_images)\n makedirs(path_to_images)\n\n # split the given video into images\n subprocess.run(['ffmpeg', '-i', temp_video, '-r', str(number_of_images / video_duration), '-f', 'image2',\n path.join(path_to_images, 'image%d.jpg')])\n\n # remove extra files\n remove_extra_images(path_to_images, number_of_images)\n remove(temp_video)", "def mergeVideos(filename):\n #all possible names\n filenames=[filename+\"_vp8.webm\",filename+\"_vp9.webm\",filename+\"_h265.mp4\",filename+\"_av1.mkv\"]\n #output name\n output=filename+\"_merged.mp4\"\n #command\n cmd = [\n \"ffmpeg\",\n \"-i\",\n filenames[0],\n \"-i\",\n filenames[1],\n \"-i\",\n filenames[2],\n \"-i\",\n filenames[3],\n \"-filter_complex\",\n '\"[0:v][1:v]hstack[top]; \\#stack videos\n [2:v][3:v]hstack[bottom]; \\\n [top][bottom]vstack,format=yuv420p[v]; \\\n [0:a][1:a][2:a][3:a]amerge=inputs=4[a]\"',\n \"-map\",\n '\"[v]\"',\n \"-map\",\n '\"[a]\"',\n \"-ac\",\n \"2\",\n output\n ]\n # to convert a list to string we use join\n separator = \" \"\n com = separator.join(cmd)\n # use the command\n os.system(com)", "def check_video_timestamps(movie_file, desired_format='.mp4', desired_framerate=30):\n\n check_video_format(movie_file, desired_format='.mp4', original_format='.avi')\n\n new_movie_file = movie_file+'_tt'+desired_format\n if not os.path.isfile(new_movie_file):\n #Convert file to 30 fps\n cmd = ['ffmpeg', '-i', movie_file+desired_format]\n cmd += ['-r', str(desired_framerate)]\n cmd += ['-y', movie_file+'_t'+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd]) \n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()\n\n #Add timecode text to video\n cmd = 'ffmpeg -i '+movie_file+'_t'+desired_format+' -vf drawtext=\\\"fontfile=/opt/X11/share/fonts/TTF/VeraMoBd.ttf: timecode=\\'00\\:00\\:00\\:00\\':rate=30: fontcolor=white@0.8: x=7: y=460\\\" -an -y '+movie_file+'_tt'+desired_format\n args = shlex.split(cmd)\n #print args\n p = subprocess.Popen(args, shell=False)\n p.wait()\n\n os.remove(movie_file+'_t'+desired_format)\n\n return new_movie_file", "def _spawn_ffmpeg(self) -> None:\r\n if self.ffmpeg_proc is not None:\r\n raise RuntimeError('_spawn_ffmpeg called when ffmpeg_proc is '\r\n + f'{self.ffmpeg_proc} (not None)')\r\n\r\n args = ['ffmpeg', '-f', 'rawvideo', '-vcodec', 'rawvideo',\r\n '-s', f'{self.frame_size[0]}x{self.frame_size[1]}',\r\n '-pix_fmt', 'rgba', '-r', str(self.fps),\r\n '-loglevel', 'quiet',\r\n '-i', 'pipe:0',\r\n '-vcodec', 'h264', '-pix_fmt', 'yuv420p',\r\n '-movflags', '+faststart']\r\n\r\n if self.bitrate > 0:\r\n args.extend(['-b', f'{self.bitrate}k'])\r\n args.extend(['-y', self.outfile])\r\n\r\n create_flags = sp.CREATE_NO_WINDOW if 'nt' in os.name else 0\r\n self.ffmpeg_proc = sp.Popen(args, shell=False, stdout=None, stderr=None,\r\n stdin=sp.PIPE, creationflags=create_flags)", "def start_recording(codec, filename=time.strftime(\"%Y-%m-%d_%H-%M-%S\")):\n global video_writer\n folder = 'video_out/' # eventually replace this with the SD card folder\n # TODO: also include branch name and/or commit ID\n path = folder + filename + '.' + filetype\n print \"Saving video to: %s\" % path\n\n height = videoinput.frame_height\n if settings.sidebyside:\n width = 2*videoinput.frame_width\n else:\n width = videoinput.frame_width\n\n try:\n video_writer = cv2.VideoWriter(path, codec, 30, (width, height))\n except:\n print \"Failed to open video file for writing!\"", "def ffmpeg(*options):\n\tffmpeg_command = [\"ffmpeg\"] + list(options)\n\tprint(\"Calling FFMPEG:\", \" \".join(ffmpeg_command))\n\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"Calling FFmpeg failed with exit code {exit_code}. CERR: {cerr} . COUT: {cout}\".format(exit_code=exit_code, cerr=str(cerr), cout=str(cout)))", "def save_video(foldername, songname, songlen, num_steps, output):\n num_steps_by_len = num_steps / songlen\n p = subprocess.Popen(['ffmpeg', '-f', 'image2', '-r', str(num_steps_by_len), '-i', '%d.png', '-c:v', 'libx264', '-pix_fmt', 'yuv420p', '-vf', 'pad=ceil(iw/2)*2:ceil(ih/2)*2', 'movie.mp4'], cwd=foldername)\n p.wait()\n\n p = subprocess.Popen(['ffmpeg', '-i', 'movie.mp4', '-i', '../audio_files/' + songname + '.mp3', '-map', '0:v', '-map', '1:a', '-c', 'copy', output], cwd=foldername)\n p.wait()", "def make_video(input_files, width=0, height=0, frame_rate=24, crf=20, output_path=\"video.mp4\"):\n if isinstance(input_files, list):\n from PIL import Image # pylint: disable=C0415\n\n with Image.open(input_files[0]) as img:\n width, height = img.size\n tmp_dir = \"tmp_ffmpeg_dir\"\n os.mkdir(tmp_dir)\n if width % 2 != 0:\n print(f\"Width ({width}) not divisible by 2\")\n width -= 1\n if height % 2 != 0:\n print(f\"Height ({width}) not divisible by 2\")\n height -= 1\n for i, inp in enumerate(input_files):\n shutil.copy(inp, os.path.join(tmp_dir, f\"{i:06d}.png\"))\n inputs = f\"{tmp_dir}/%06d.png\"\n command = ffmpeg_common_args(frame_rate, inputs, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n for i in range(len(input_files)):\n os.remove(os.path.join(tmp_dir, f\"{i:06d}.png\"))\n os.rmdir(tmp_dir)\n elif isinstance(input_files, str):\n assert width != 0 and height != 0\n command = ffmpeg_common_args(frame_rate, input_files, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n else:\n assert (\n False\n ), f'input_files should be list (of files) or str (of file template, e.g., \"%04d.png\") instead of {type(input_files)}'", "def make_video(pattern, plotdir, moviedir, movienametag):\n images_list = glob('%s/%s'%(plotdir, pattern))\n images_list.sort()\n # save all required files into tmp_moviedir, with simple filenames: %.4d.png\n tmp_moviedir = '%s/tmp_movie_%s'%(plotdir, movienametag)\n os.system('mkdir -p %s'%tmp_moviedir)\n for i in range(len(images_list)):\n fname = images_list[i].split('%s/'%plotdir)[-1].split('.png')[0]\n os.system('cp %s/%s.png %s/%.4d.png'%(plotdir, fname, tmp_moviedir, i))\n\n os.system('avconv -i %s'%tmp_moviedir +'/%04d.png ' \\\n +' -y -c:v libx264 -pix_fmt yuv420p %s/%s.mp4'%(moviedir, movienametag))", "def convert_to_mp3(filename: str, title: str, start: int=None, end: int=None) -> list:\n\t# setup args for ffmpeg\n\tfile_a = f\"{path_to_wrk_dir}{filename}.mp4\" # input file\n\tfile_b = f\"{path_to_wrk_dir}{title}.mp3\" # output file\n\tfiles_b = [] # this list need if file more than 30 mb\n\targs = [\n\t\t\"/usr/bin/ffmpeg\", # path to ffmpeg\n\t\t\"-i\", # flag for input file\n\t\tfile_a, # input file\n\t\t\"-acodec\", # setup codec\n\t\t\"libmp3lame\", # codec name\n\t\t]\n\n\t# now need setup timings for target encode\n\tif start is not None and start != 0:\n\t\targs = args + [\"-ss\", str(start)]\n\tif end is not None and end != 0:\n\t\targs = args + [\"-t\", str(end - start)]\n\n\t# and last part for args to ffmpeg\n\targs = args + [\n\t\t\"-metadata\", # setup metadata for file\n\t\tf\"title={title}\", # title\n\t\t\"-metadata\",\n\t\tf\"artist={title}\", # and artist\n\t\t\"-b:a\", # setup bitrate\n\t\t\"320k\", # setup max bitrate\n\t\tfile_b,\n\t\t]\n\tprint(f\"{args}\")\n\t# start subprocess for encoding\n\tpopen = subprocess.Popen(args)\n\tpopen.wait()\n\n\t# check size file. if he more than 30 mb, bot need split him to chunks.\n\tsize = getsize(file_b) / 1024 / 1024\n\tif size > 30 and ( start or end is None ):\n\t\t# setup args for split to chunks\n\t\targs = [\n\t\t\t\"ffprobe\",\n\t\t\t\"-show_entries\",\n\t\t\t\"format=duration\",\n\t\t\t\"-i\",\n\t\t\tfile_b,\n\t\t\t]\n\n\t\t# get duration video.\n\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\tpopen.wait()\n\t\toutput = popen.stdout.read()\n\t\t# now we know how long this audio file\n\t\t# split to 10 min chunks\n\t\tdur = re.findall(r\"\\d{1,10}\", str(output))\n\t\t# get chunks count for loop\n\t\tcount_chunks = (int(dur[0]) // 600) + 1\n\t\tfor chunk_start_time in range(0, count_chunks):\n\t\t\t# setup args for split\n\t\t\t# big parts of args the same for encode\n\t\t\targs = [\n\t\t\t\t\"/usr/bin/ffmpeg\",\n\t\t\t\t\"-i\",\n\t\t\t\tfile_b,\n\t\t\t\t\"-ss\",\n\t\t\t\tf\"{chunk_start_time * 600}\", # when start chunk\n\t\t\t\t\"-t\",\n\t\t\t\t\"600\", # 10 mints duration\n\t\t\t\t\"-acodec\",\n\t\t\t\t\"copy\", # copy\n\t\t\t\t\"-b:a\",\n\t\t\t\t\"320k\",\n\t\t\t\tf\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\", # now we have path to video with chunk number.\n\t\t\t]\n\t\t\ttry:\n\t\t\t\t# start process for cut chunk\n\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\t\t\tpopen.wait()\n\t\t\t# handle except.\n\t\t\texcept Exception as e:\n\t\t\t\tprint(f\"Exception - {e}\")\n\t\t\tfiles_b.append(f\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\") # append name of file in list\n\t\tremove(file_b)\n\ttry:\n\t\t# remove tmp file\n\t\tremove(file_a)\n\t# handle except\n\texcept FileNotFoundError:\n\t\tfiles = get_file_list(path_to_wrk_dir)\n\t\tfor i in files:\n\t\t\tif -1 != f\"{path_to_wrk_dir}{i}\".find(f\"{filename}\") and f\"{i}\".find(f\".mp3\") == -1:\n\t\t\t\ttry:\n\t\t\t\t\tremove(f\"{path_to_wrk_dir}{i}\")\n\t\t\t\texcept FileNotFoundError:\n\t\t\t\t\tprint(f\"can't remove file {path_to_wrk_dir}{i}\")\n\tif len(files_b) == 0:\n\t\treturn [file_b]\n\telse:\n\t\treturn files_b", "def seqIo_toVid(fName, ext='avi'):\n\n assert fName[-3:]=='seq', 'Not a seq file'\n sr = seqIo_reader(fName)\n N = sr.header['numFrames']\n h = sr.header['height']\n w = sr.header['width']\n fps = sr.header['fps']\n\n out = fName[:-3]+ext\n sw = skvideo.io.FFmpegWriter(out)\n # sw = cv2.VideoWriter(out, -1, fps, (w, h))\n timer = pb.ProgressBar(widgets=['Converting ', pb.Percentage(), ' -- ',\n pb.FormatLabel('Frame %(value)d'), '/',\n pb.FormatLabel('%(max)d'), ' [', pb.Timer(), '] ',\n pb.Bar(), ' (', pb.ETA(), ') '], maxval=N)\n\n for f in range(N):\n I, ts = sr.getFrame(f)\n sw.writeFrame(Image.fromarray(I))\n # sw.write(I)\n timer.update(f)\n timer.finish()\n # cv2.destroyAllWindows()\n # sw.release()\n sw.close()\n sr.close()\n print(out + ' converted')", "def write_video(frames, filename, fps=20):\n \n # On Mac systems, copy ffmeg binaries to your PATH (http://ffmpegmac.net/)\n \n if platform.system() == 'Windows':\n err_str = 'Don\\'t know how to write a movie for %s platform' % platform.system()\n raise NotImplementedError(err_str)\n\n \n if len(frames.shape) == 4:\n pix_fmt = 'rgb24'\n else:\n pix_fmt = 'gray'\n \n # normalize\n max_pix_val = np.percentile(frames, 99.9)\n if frames.dtype in (np.bool, bool):\n frames = frames.astype(np.uint8)\n frames -= frames.min()\n frames[frames>max_pix_val] = max_pix_val\n if max_pix_val > 0:\n frames *= 255. / max_pix_val\n frames = frames.astype(np.uint8)\n \n # figure out which av program is installed\n program_name = ''\n try:\n subprocess.check_call(['avconv', '-h'], stdout=DEVNULL, stderr=DEVNULL)\n program_name = 'avconv'\n except OSError:\n try:\n subprocess.check_call(['ffmpeg', '-h'], stdout=DEVNULL, stderr=DEVNULL)\n program_name = 'ffmpeg'\n except OSError:\n pass\n if not program_name:\n raise OSError('Can\\'t find avconv or ffmpeg')\n \n # prepare pipe to av converter program\n size_str = '%ix%i' % (frames.shape[1], frames.shape[2])\n cmd = [program_name,\n '-y', # (optional) overwrite output file if it exists\n '-f', 'rawvideo',\n '-vcodec','rawvideo',\n '-s', size_str, # size of one frame\n '-pix_fmt', pix_fmt,\n '-r', str(fps), # frames per second\n '-i', '-', # input comes from a pipe\n '-an', # no audio\n '-qscale', '1',\n '-vcodec','mjpeg',\n filename]\n \n pipe = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=DEVNULL, stderr=subprocess.STDOUT)\n \n # write frames \n for frame in frames:\n frame = np.fliplr(frame)\n pipe.stdin.write(frame.tostring())\n pipe.stdin.close()\n pipe.wait()", "def convert_files(enumerated_src_file):\n i, src_file = enumerated_src_file\n src_file = src_file.strip()\n file_extension, acodec, quality = audio_codec()\n\n dst_file = '.'.join(src_file.split('.')[:-1]) + file_extension\n sys.stdout.write(str(i + 1) + ': ' + src_file + ' -> ' + dst_file + '\\n')\n subprocess.call(['ffmpeg', '-i', src_file, '-vn', '-acodec',\n acodec, '-aq', quality, dst_file, '-loglevel', 'quiet'])\n return src_file", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def main():\n print(\"This is a library for reading video sequences into python via ffmpeg. \")\n print(\"Provides the 'Video_Reader' iterator class. \")\n print(\"Requires ffmpeg be installed. \")", "def transcode(self) -> None:\n # Get source mediainfo to use in validation\n source_media_info = self.get_media_info(self.source)\n\n # Common ffmpeg flags\n ff = FFMPEG(overwrite=True, loglevel='repeat+level+info')\n # Init source file\n ff < SourceFile(self.source)\n # Scaling\n fc = ff.init_filter_complex()\n fc.video | Scale(**TRANSCODING_OPTIONS[SCALE]) | fc.get_video_dest(0)\n\n # set group of pixels length to segment size\n gop = math.floor(source_media_info[VIDEO_FRAME_RATE] * GOP_DURATION)\n # preserve source audio sampling rate\n arate = source_media_info[AUDIO_SAMPLING_RATE]\n # preserve original video FPS\n vrate = source_media_info[VIDEO_FRAME_RATE]\n # codecs, muxer and output path\n\n cv0 = VideoCodec(\n gop=gop,\n vrate=vrate,\n **TRANSCODING_OPTIONS[VIDEO_CODEC])\n ca0 = AudioCodec(\n arate=arate,\n **TRANSCODING_OPTIONS[AUDIO_CODEC])\n out0 = Muxer(self.destination, format='mp4')\n\n # Add output file to ffmpeg\n ff.add_output(out0, cv0, ca0)\n\n # Run ffmpeg\n self.run(ff)\n\n # Get result mediainfo\n dest_media_info = self.get_media_info(self.destination)\n\n # Validate ffmpeg result\n self.validate(source_media_info, dest_media_info)", "def _decode(item):\n tivo_filename = item.filename()\n logger.info(\"Decoding %s\" % tivo_filename)\n\n mpeg_filename = item.filename(ext=\"mpg\")\n videos_dir = item.vdir()\n\n p = subprocess.Popen([\"/usr/local/bin/tivodecode\", \"--mak\", os.environ[\"MAK\"], \n \"--out\", mpeg_filename, tivo_filename], cwd=videos_dir,\n stderr=subprocess.STDOUT, stdout=subprocess.PIPE)\n rc = p.wait()\n\n logger.info(\"tivodecode returned %d\" % rc)\n logger.info(\"tivodecode output: '%s'\" % p.stdout.read())\n if rc == 0:\n # success!\n item.decoded = True\n item.save()\n else:\n raise Exception(\"Tivodecode failed on file '%s' with rc %d\" %\n (tivo_filename, rc))", "def video_audio_files(video_name, path_curr_vid, path_current_frames):\n video_file = f\"{path_curr_vid}{video_name}\"\n audio_file = f\"{path_current_frames}{video_name.replace(VIDEO_EXT,AUDIO_EXT)}\"\n return video_file, audio_file", "def make_video(self, mp4=True, gif=True):\n fn = self.get_output_filename(\".mp4\")\n command = (\n (get_ffmpeg_path() + f\" -loglevel panic -framerate {self.framerate} -i \")\n + os.path.join(self.frame_directory, FRAME_FN_TEMPLATE)\n + \" -s:v \"\n + str(self.width)\n + \"x\"\n + str(self.height)\n + \" -c:v libx264 -profile:v high -crf 1 -pix_fmt yuv420p -y \"\n + fn\n )\n\n os.system(command)\n\n if gif:\n mp4_to_gif(\n self.get_output_filename(\".mp4\"),\n self.get_output_filename(\".gif\"),\n self.framerate,\n )\n\n if not mp4:\n os.remove(fn)", "def video_codec(self, video_codec):\n # type: (string_types) -> None\n\n if video_codec is not None:\n if not isinstance(video_codec, string_types):\n raise TypeError(\"Invalid type for `video_codec`, type has to be `string_types`\")\n\n self._video_codec = video_codec", "def main(ctx, ttyrec, encoding, ibm, outfile, size, fps, font_size, font_file,\n bold_font_file, info, info_all):\n if ibm:\n encoding = 'cp437'\n fp, def_outfile = open_or_get(ttyrec)\n try:\n with fp:\n updates = list(read_ttyrec(fp, encoding=encoding, errors='replace'))\n except ShortTTYRecError as e:\n ctx.fail(str(e))\n if info or info_all:\n about = ttyrec_info(updates, show_all=info_all)\n click.echo(json.dumps(about, sort_keys=True, indent=4))\n return\n if len(updates) < 2:\n ctx.fail(\n 'ttyrec only has {} update{}; need at least two to make a video'\n .format(len(updates), 's' if len(updates) != 1 else '')\n )\n duration = updates[-1].timestamp - updates[0].timestamp\n click.echo(\n f'ttyrec length: {duration} ({len(updates)} distinct frames)',\n err=True,\n )\n imgr = ScreenRenderer(\n font = ImageFont.truetype(font_file, size=font_size),\n bold_font = ImageFont.truetype(bold_font_file, size=font_size),\n font_size = font_size,\n columns = size[0],\n lines = size[1],\n )\n imageio.plugins.ffmpeg.download()\n if outfile is None:\n outfile = def_outfile\n click.echo(f'Writing {outfile} ...', err=True)\n with click.progressbar(\n imgr.render_updates(updates, fps, block_size=MACRO_BLOCK_SIZE),\n length=ceil(duration.total_seconds() * fps),\n ) as mov_frames:\n imageio.mimwrite(outfile, map(np.asarray, mov_frames), fps=fps)", "def cmdLine():\n parser = argparse.ArgumentParser(description=\"Encode decode any file\"\n ,prog='codec') #usage='codec -e'\n parser.add_argument('-e','--encoder', help='Encoder number', type=int)\n parser.add_argument('-d', '--decoder', help='Decoder numer', type=int)\n parser.add_argument('-i', '--input', help='Input file name to encode/decode', type=str)\n parser.add_argument('-o', '--output', help='Output file name', type=str)\n parser.add_argument('-l', '--list', help='list of all encode/decoder'\n ,action='store_true')\n parser.add_argument('-p', '--passphrase', help='Pass phrase to encode file', type=str)\n parser.add_argument('-t', '--time', help='Validity time of encoded file in seconds', type=int, default=0)\n return parser.parse_args()", "def videoFrames(filename, framerate=1):\n vid_file = os.path.join(os.path.dirname(os.getcwd()), \"Database\", \"Video\", filename)\n print(vid_file)\n assert os.path.isfile(vid_file), \"Given path is not a valid file\"\n tmpdir = os.path.join(os.getcwd(), \"tmp\")\n subprocess.run(\n [\n \"ffmpeg\",\n \"-i\",\n vid_file,\n \"-r\",\n f\"{framerate}\",\n os.path.join(tmpdir, \"img_%04d.jpg\"),\n ]\n )\n return [os.path.join(tmpdir, i) for i in os.listdir(tmpdir) if not i.endswith(\".wav\")]", "def convert_to_m4a(self,path, filename):\n codec = \"aac\"\n m4a_filename = filename + \".m4a\"\n command = [self.FFMPEG_BIN,\n \"-n\",\n \"-i\", path,\n \"-acodec\", codec,\n \"-ab\", \"128k\",\n m4a_filename\n ]\n\n return command", "def process_videos(chapter_info):\n\n print(\"Processing chapter_info:\", chapter_info)\n\n # getting creation time of the first chapter\n # TODO update when adding multiple directory proccessing\n os.chdir(DIR_VIDEO_FILES)\n print(\"1st chapter\", chapter_info[1][0])\n chap1_time = time.strftime(\n r\"%Y-%m-%d_%H-%M\", time.localtime(os.path.getctime(chapter_info[1][0])))\n print(\"1st chapter creation\", chap1_time)\n\n # output_file = f\"M_GH00{chapter_info[0]}_{chap1_time}.MP4\"\n output_file = f\"{chap1_time}_GH00{chapter_info[0]}_MRG.MP4\"\n if os.path.isfile(output_file):\n print(f\"Chapter already processed, found file: {output_file}\")\n return\n\n # preparing text file containing file list for merging (for ffmpeg)\n video_list_file = chapter_info[0] + \"_merge.txt\"\n with open(video_list_file, \"w\") as f:\n for video_chapter in chapter_info[1]:\n f.write(f\"file {video_chapter}\\n\")\n\n command = f\"{FFMPEG_EXE} -f concat -i {video_list_file} -c copy {DIR_OUTPUT}{output_file}\"\n print(\"command =\", command)\n # p = subprocess.run(\"dir\", shell=True, capture_output=True)\n # p = subprocess.run(\"dir\", shell=True, stdout=subprocess.PIPE, text=True)\n p = subprocess.run(command, stdout=subprocess.PIPE, text=True)\n print(\"returncode =\", p.returncode)\n # print(\"stdout =\", p.stdout)\n os.remove(video_list_file) # remove file list after merging\n # rename original chapters after processing\n for video_chapter in chapter_info[1]:\n os.rename(video_chapter, f\"OK_{video_chapter}\")", "def encode(audio, video, output):\n check_call([\"mencoder\", \"-audiofile\", audio, \"-oac\", \"lavc\", \"-ovc\",\n \"lavc\", video, \"-o\", output], stdin=PIPE, stdout=PIPE, stderr=STDOUT)", "def mp3_to_wav(mp3_file, wav_file, encoder='mpg123'):\n if encoder == 'mpg123':\n bash_command = ['mpg123', '-w', wav_file, '--mono', mp3_file]\n else:\n bash_command = ['ffmpeg', '-i', mp3_file, wav_file]\n subprocess.run(bash_command)", "def transcode(path, outpath):\n\n needs_transcode = determine_transcode(path)\n logger.info(f\"Transcoding {path} to {outpath}...\")\n\n cmd = [\n \"ffmpeg\", \"-y\",\n \"-i\", path,\n \"-an\",\n \"-metadata:s\", \"handler_name=tator\",\n \"-vcodec\", \"libx264\",\n \"-g\", \"25\",\n \"-preset\", \"fast\",\n \"-pix_fmt\", \"yuv420p\",\n \"-vf\", \"pad=ceil(iw/2)*2:ceil(ih/2)*2\",\n \"-movflags\",\n \"faststart+frag_keyframe+empty_moov+default_base_moof\",\n \"-tune\", \"fastdecode\",\n ]\n\n if needs_transcode[1]:\n #Resize to 720p\n cmd.extend([\"-vf\", \"scale=-2:720\"])\n\n cmd.append(outpath)\n logger.info('ffmpeg cmd = {}'.format(cmd))\n subprocess.run(cmd, check=True)\n logger.info(\"Transcoding finished!\")", "def compose_stack_cmd(\n event: SentryEvent,\n scale: int = 4,\n speed: int = 1,\n quality: int = 23,\n global_opts: List[str] = None,\n) -> List[str]:\n\n views = [\"front\", \"back\", \"left\", \"right\"]\n\n cmd = [\"ffmpeg\"]\n\n if global_opts:\n cmd.extend(global_opts)\n\n for v in views:\n try:\n cmd.extend([\"-i\", str(event.event_dir / event.get_view(v))])\n except IndexError:\n # hack for pre 10.0 sw with no back video\n cmd.extend([\"-i\", str(event.event_dir / event.get_view(\"front\"))])\n cmd.extend([\"-an\", \"-filter_complex\"])\n filter = []\n for i, v in enumerate(views):\n filter.extend([f\"[{i}:v]\", f\"scale=iw/{scale}:ih/{scale}\", f\"[{v}];\"])\n filter.extend([\"[front][back]\", \"hstack\", \"[long];\"])\n filter.extend([\"[right][left]\", \"hstack\", \"[lat];\"])\n filter.extend([\"[long][lat]\", \"vstack\", \"[all];\"])\n filter.extend([\"[all]\", f\"setpts={1/speed}*PTS\", \"[res]\"])\n filter_string = \"\".join(filter)\n cmd.append(f\"{filter_string}\")\n cmd.extend([\"-c:v\", \"libx264\", \"-crf\", f\"{quality}\"])\n output_path = event.output_path(scale, speed, quality)\n cmd.extend([\"-map\", \"[res]\", str(output_path)])\n return cmd", "def process_video(proc_state):\n entry = proc_state.entry\n workbench = proc_state.workbench\n video_config = mgg.global_config['media_type:mediagoblin.media_types.video']\n\n queued_filepath = entry.queued_media_file\n queued_filename = proc_state.get_queued_filename()\n name_builder = FilenameBuilder(queued_filename)\n\n medium_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}-640p.webm'))\n\n thumbnail_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}.thumbnail.jpg'))\n\n # Create a temporary file for the video destination (cleaned up with workbench)\n tmp_dst = NamedTemporaryFile(dir=workbench.dir, delete=False)\n with tmp_dst:\n # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square\n progress_callback = ProgressCallback(entry)\n\n dimensions = (\n mgg.global_config['media:medium']['max_width'],\n mgg.global_config['media:medium']['max_height'])\n\n # Extract metadata and keep a record of it\n metadata = transcoders.VideoTranscoder().discover(queued_filename)\n store_metadata(entry, metadata)\n\n # Figure out whether or not we need to transcode this video or\n # if we can skip it\n if skip_transcode(metadata):\n _log.debug('Skipping transcoding')\n\n dst_dimensions = metadata['videowidth'], metadata['videoheight']\n\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n did_transcode = False\n else:\n transcoder = transcoders.VideoTranscoder()\n\n transcoder.transcode(queued_filename, tmp_dst.name,\n vp8_quality=video_config['vp8_quality'],\n vp8_threads=video_config['vp8_threads'],\n vorbis_quality=video_config['vorbis_quality'],\n progress_callback=progress_callback,\n dimensions=dimensions)\n\n dst_dimensions = transcoder.dst_data.videowidth,\\\n transcoder.dst_data.videoheight\n\n # Push transcoded video to public storage\n _log.debug('Saving medium...')\n mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath)\n _log.debug('Saved medium')\n\n entry.media_files['webm_640'] = medium_filepath\n\n did_transcode = True\n\n # Save the width and height of the transcoded video\n entry.media_data_init(\n width=dst_dimensions[0],\n height=dst_dimensions[1])\n\n # Temporary file for the video thumbnail (cleaned up with workbench)\n tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False)\n\n with tmp_thumb:\n # Create a thumbnail.jpg that fits in a 180x180 square\n transcoders.VideoThumbnailerMarkII(\n queued_filename,\n tmp_thumb.name,\n 180)\n\n # Push the thumbnail to public storage\n _log.debug('Saving thumbnail...')\n mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath)\n entry.media_files['thumb'] = thumbnail_filepath\n\n # save the original... but only if we did a transcoding\n # (if we skipped transcoding and just kept the original anyway as the main\n # media, then why would we save the original twice?)\n if video_config['keep_original'] and did_transcode:\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n # Remove queued media file from storage and database\n proc_state.delete_queue_file()", "def main(_):\n print('argument to expand', ARGS.video_in)\n print('argument expanded', glob.glob(ARGS.video_in))\n video_count = 0\n for video_filename in glob.glob(ARGS.video_in):\n print('start parsing', video_filename)\n data = skvideo.io.ffprobe(video_filename)['video']\n rate_str = six.ensure_str(data['@r_frame_rate']).split('/')\n rate = float(rate_str[0]) / float(rate_str[1])\n print('detected frame rate:', rate)\n\n print('load frames:')\n video = skvideo.io.vreader(video_filename)\n frame_count = 0\n file_count = 0\n for frame in video:\n if (frame_count > ARGS.offset) and \\\n ((frame_count-ARGS.offset)%ARGS.skip == 0) and \\\n (frame_count/rate >= ARGS.from_s) and \\\n (frame_count/rate <= ARGS.to_s or ARGS.to_s == -1):\n print(frame_count,)\n img = Image.fromarray(frame)\n if ARGS.crop:\n img = crop(img, ARGS.size)\n # save file\n file_number = file_count + video_count * ARGS.multiple + ARGS.start\n if ARGS.format_ext.lower() == 'jpg':\n file_out = os.path.join(ARGS.path_out,\n 'f{:07d}.jpg'.format(file_number))\n img.save(file_out, 'JPEG')\n elif ARGS.format_ext.lower() == 'png':\n file_out = os.path.join(ARGS.path_out,\n 'f{:07d}.png'.format(file_number))\n img.save(file_out, 'PNG')\n else:\n print('unrecognize format', ARGS.format_ext)\n sys.exit()\n file_count += 1\n frame_count += 1\n video_count += 1", "def main():\n destination = Path(argv[1])\n source_files = destination.glob(\"**/*.wma\")\n for file in source_files:\n new_name = file.name.rsplit(\".\", maxsplit=1)[0] + \".flac\"\n dest = str(file.parent / new_name)\n cmd = list(map(str, [\"avconv\", \"-i\", file, dest]))\n if platform == \"win32\":\n print(\"Running on windows... on Unix I'd run the following command:\")\n print(cmd)\n else:\n that = Popen(cmd)\n that.wait()", "def process_group(pattern, params):\n # check subdirectory according to filter options\n subdir = params['label']\n # and the parameters label\n print 'Processing:', subdir\n\n # search for videos matching the pattern\n search = os.path.join(ROOT_RAWDATA_DIR, pattern)\n print 'Search pattern:', search\n flist = sorted(glob.glob(search))\n\n # for each matching video\n for f in flist:\n # video structures (copied from LEGOS FTP) is yyyymmdd/HH/MM.mp4\n # and we want to store frames as yyyymmdd/yyyymmdd_HH/yyyymmdd_HHMM/yyyymmdd_HHMM_<index>.<format>\n # so: recursively split to extract basename, hour and date\n p, fname = os.path.split(f)\n p, hour = os.path.split(p)\n p, date = os.path.split(p)\n minute, _ = os.path.splitext(fname)\n # compute output dir, and prefix for frames\n outdir = os.path.join(ROOT_PREPROC_DIR,\n subdir, # according to parameters\n date,\n '{}_{}'.format(date, hour),\n '{}_{}{}'.format(date, hour, minute),\n )\n prefix = '{}_{}{}_'.format(date, hour, minute)\n # create output directory if neeeded\n if not os.path.exists(outdir):\n print 'Creating output directory', outdir\n os.makedirs(outdir, 0755)\n # call decoder\n command = ['python', '-u', 'decoder.py',\n f,\n '-o', outdir,\n '-p', prefix,\n '-l', params['label'],\n '-f', str(params['image_format']),\n '-m', str(params['median_length']),\n '-r', str(params['resolution']),\n '-O', str(params['origin'][0]), str(params['origin'][1]),\n '-d', str(params['dimensions'][0]), str(params['dimensions'][1]),\n '-a', str(params['rotation']),\n ]\n subprocess.call(command)", "def split_and_upload(filepath,\n # upload parameters\n project_name=None, project_id=None, dataset_name=None, dataset_id=None, remote_path=None,\n # split parameters\n split_seconds=None, split_chunks=None, split_pairs=None,\n loglevel='panic'):\n try:\n import ffmpeg\n except ImportError:\n logger.error(\n 'Import Error! Cant import ffmpeg. '\n 'Annotations operations will be limited. import manually and fix errors')\n raise\n # https://www.ffmpeg.org/ffmpeg-formats.html#Examples-9\n\n if not os.path.isfile(filepath):\n raise IOError('File doesnt exists: {}'.format(filepath))\n logger.info('Extracting video information...')\n # call to ffmpeg to get frame rate\n probe = Videos.get_info(filepath)\n fps = eval(probe['streams'][0]['avg_frame_rate'])\n n_frames = eval(probe['streams'][0]['nb_frames'])\n video_length = eval(probe['streams'][0]['duration'])\n logger.info('Video frame rate: {}[fps]'.format(fps))\n logger.info('Video number of frames: {}'.format(n_frames))\n logger.info('Video length in seconds: {}[s]'.format(video_length))\n\n # check split params and calc split params for ffmpeg\n if split_seconds is not None:\n # split by seconds\n split_length = split_seconds\n if split_length <= 0:\n raise ValueError('\"split_length\" can\\'t be 0')\n split_count = int(np.ceil(video_length / split_length))\n list_frames_to_split = [fps * split_length * n for n in range(1, split_count)]\n elif split_chunks is not None:\n # split for known number of chunks\n split_count = split_chunks\n if split_chunks <= 0:\n raise ValueError('\"split_chunks\" size can\\'t be 0')\n split_length = int(np.ceil(video_length / split_chunks))\n list_frames_to_split = [fps * split_length * n for n in range(1, split_count)]\n elif split_pairs is not None:\n if not isinstance(split_pairs, list):\n raise ValueError('\"split_times\" must be a list of tuples to split at.')\n if not (isinstance(split_pairs[0], list) or isinstance(split_pairs[0], tuple)):\n raise ValueError('\"split_times\" must be a list of tuples to split at.')\n list_frames_to_split = [fps * split_second for segment in split_pairs for split_second in segment]\n split_count = len(list_frames_to_split)\n else:\n raise ValueError('Must input one split option (\"split_chunks\", \"split_time\" or \"split_pairs\")')\n if split_count == 1:\n raise ValueError('Video length is less than the target split length.')\n # to integers\n list_frames_to_split = [int(i) for i in list_frames_to_split]\n # remove 0 if in the first segmetn\n if list_frames_to_split[0] == 0:\n list_frames_to_split.pop(0)\n # add last frames if not exists\n if list_frames_to_split[-1] != n_frames:\n list_frames_to_split = list_frames_to_split + [n_frames]\n logger.info('Splitting to %d chunks' % split_count)\n\n basename, ext = os.path.splitext(filepath)\n output_regex = os.path.join(basename, '%%03d.mp4')\n # create folder\n if not os.path.exists(basename):\n os.makedirs(basename, exist_ok=True)\n # run ffmpeg\n try:\n stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_regex,\n **{'x264opts': 'bframes=0',\n 'f': 'segment',\n 'reset_timestamps': '1',\n 'map': '0',\n 'segment_frames': ','.join(\n [str(i) for i in\n list_frames_to_split])\n })\n ffmpeg.overwrite_output(stream).run(capture_stdout=True)\n except Exception:\n logger.exception('ffmpeg error in disassemble:')\n raise\n\n # split_cmd = 'ffmpeg -y -i \"%s\" -b 0 -f mp4 -reset_timestamps 1 -map 0 -f segment -segment_frames %s \"%s\"' % (\n # filepath, ','.join([str(int(i)) for i in list_frames_to_split]), output_regex)\n # logger.info('About to run: %s' % split_cmd)\n # subprocess.check_call(shlex.split(split_cmd), universal_newlines=True)\n\n # rename\n list_frames_to_split = [0] + list_frames_to_split\n filenames = list()\n for n in range(split_count):\n old_filename = output_regex.replace('%03d', '%03d' % n)\n new_filename = output_regex.replace('%03d', '%s__%s' %\n (time.strftime('%H_%M_%S', time.gmtime(list_frames_to_split[n] / fps)),\n time.strftime('%H_%M_%S',\n time.gmtime(list_frames_to_split[n + 1] / fps))))\n filenames.append(new_filename)\n # rename to informative name\n if os.path.isfile(new_filename):\n logger.warning('File already exists. Overwriting!: {}'.format(new_filename))\n os.remove(new_filename)\n os.rename(old_filename, new_filename)\n # check if in pairs, if not - delete\n if split_pairs is not None:\n start_frames = [pair[0] for pair in split_pairs]\n end_frames = [pair[1] for pair in split_pairs]\n if (list_frames_to_split[n] // fps) in start_frames and (\n list_frames_to_split[n + 1] // fps) in end_frames:\n # keep video\n pass\n else:\n os.remove(new_filename)\n Videos.upload_to_platform(project_name=project_name,\n project_id=project_id,\n dataset_name=dataset_name,\n dataset_id=dataset_id,\n remote_path=remote_path,\n local_path=basename)", "def video_encoding(self):\n self.output_file = outputs_filenames(self.input, self.output_file)\n\n if self.resume and (self.temp / 'done.json').exists():\n set_logging(self.logging, self.temp)\n else:\n setup(self.temp, self.resume)\n set_logging(self.logging, self.temp)\n print(self.queue)\n framenums = split_routine(self.input, self.scenes, self.split_method, self.temp, self.min_scene_len, self.queue, self.threshold)\n\n if self.extra_split:\n framenums = extra_splits(input, framenums, self.extra_split)\n\n segment(self.input, self.temp, framenums)\n extract_audio(input, self.temp, self.audio_params)\n\n chunk = get_video_queue(self.temp, self.resume)\n\n # Make encode queue\n commands, self.video_params = compose_encoding_queue(chunk, self.temp, self.encoder, self.video_params, self.ffmpeg_pipe, self.passes)\n log(f'Encoding Queue Composed\\n'\n f'Encoder: {self.encoder.upper()} Queue Size: {len(commands)} Passes: {self.passes}\\n'\n f'Params: {self.video_params}\\n\\n')\n\n self.workers = determine_resources(self.encoder, self.workers)\n\n self.encoding_loop(commands)\n\n try:\n concatenate_video(self.temp, self.output_file, keep=self.keep)\n\n except Exception as e:\n _, _, exc_tb = sys.exc_info()\n print(f'Concatenation failed, FFmpeg error\\nAt line: {exc_tb.tb_lineno}\\nError:{str(e)}')\n log(f'Concatenation failed, aborting, error: {e}\\n')\n terminate()\n\n if self.vmaf:\n plot_vmaf(self.input, self.output_file, model=self.vmaf_path)", "def iblrig_video_compression(session_path, command):\n output_files = list(session_path.joinpath(\"raw_video_data\").rglob('_iblrig_*.mp4'))\n rig_avi_files = list(session_path.joinpath(\"raw_video_data\").rglob('_iblrig_*.avi'))\n # first compress everything (the rationale is not to delete anything if there is a crash)\n for file_in in rig_avi_files:\n _logger.info(f\" compressing {file_in}\")\n file_out = file_in.with_suffix('.mp4')\n status, fout = compress(file_in=file_in, file_out=file_out,\n command=command, remove_original=False)\n output_files.append(fout)\n # then remove everything\n for file_in in rig_avi_files:\n file_in.unlink()\n return output_files", "def __init__(self, input_file_path, convert_to_bgr=False):\n self.__yuv_video = YuvDecoder(input_file_path, convert_to_bgr=True)\n print('After INSTANTIATION')\n self.__yuv_video.start()", "def convert(processed_dir: str, video_file: str):\n\n video_name = osp.splitext(osp.basename(video_file))[0]\n out_dir = processed_dir + video_name\n\n # create img dir\n if not osp.exists(processed_dir):\n os.mkdir(processed_dir)\n\n # Create dir for video file if not existent\n # this is where we save our images\n if not osp.exists(out_dir):\n os.mkdir(out_dir)\n\n if osp.exists(out_dir):\n os.mkdir(out_dir + \"/kermit/\")\n os.mkdir(out_dir + \"/not_kermit/\")\n\n # open video file for processing\n cap = cv.VideoCapture(video_file)\n frame_rate = cap.get(5) # frame rate\n\n sec = 0\n total_count = (60*25)+50 # just an approximation\n pbar = tqdm.tqdm(total=total_count, leave=False)\n\n count = 0\n while (cap.isOpened()):\n frame_id = cap.get(1) # current frame number\n frame_exists, curr_frame = cap.read()\n\n if not frame_exists:\n break\n else:\n if (frame_id % math.floor(frame_rate) == 0):\n # output is : video_file/<video_file>_frameNr.jpg\n cv.imwrite(osp.join(out_dir, '{}_{}.jpg'.format(video_name,count)), curr_frame)\n count = count + 1\n pbar.update(1)\n\n pbar.close()\n # release resources\n cap.release()", "def init_transcode():\n if not os.path.exists(g.TCFILE):\n config_file_contents = \"\"\"\\\n# transcoding presets for mps-youtube\n# VERSION 0\n\n# change ENCODER_PATH to the path of ffmpeg / avconv or leave it as auto\n# to let mps-youtube attempt to find ffmpeg or avconv\nENCODER_PATH: auto\n\n# Delete original file after encoding it\n# Set to False to keep the original downloaded file\nDELETE_ORIGINAL: True\n\n# ENCODING PRESETS\n\n# Encode ogg or m4a to mp3 256k\nname: MP3 256k\nextension: mp3\nvalid for: ogg,m4a\ncommand: ENCODER_PATH -i IN -codec:a libmp3lame -b:a 256k OUT.EXT\n\n# Encode ogg or m4a to mp3 192k\nname: MP3 192k\nextension: mp3\nvalid for: ogg,m4a\ncommand: ENCODER_PATH -i IN -codec:a libmp3lame -b:a 192k OUT.EXT\n\n# Encode ogg or m4a to mp3 highest quality vbr\nname: MP3 VBR best\nextension: mp3\nvalid for: ogg,m4a\ncommand: ENCODER_PATH -i IN -codec:a libmp3lame -q:a 0 OUT.EXT\n\n# Encode ogg or m4a to mp3 high quality vbr\nname: MP3 VBR good\nextension: mp3\nvalid for: ogg,m4a\ncommand: ENCODER_PATH -i IN -codec:a libmp3lame -q:a 2 OUT.EXT\n\n# Encode m4a to ogg\nname: OGG 256k\nextension: ogg\nvalid for: m4a\ncommand: ENCODER_PATH -i IN -codec:a libvorbis -b:a 256k OUT.EXT\n\n# Encode ogg to m4a\nname: M4A 256k\nextension: m4a\nvalid for: ogg\ncommand: ENCODER_PATH -i IN -strict experimental -codec:a aac -b:a 256k OUT.EXT\n\n# Encode ogg or m4a to wma v2\nname: Windows Media Audio v2\nextension: wma\nvalid for: ogg,m4a\ncommand: ENCODER_PATH -i IN -codec:a wmav2 -q:a 0 OUT.EXT\"\"\"\n\n with open(g.TCFILE, \"w\") as tcf:\n tcf.write(config_file_contents)\n dbg(\"generated transcoding config file\")\n\n else:\n dbg(\"transcoding config file exists\")\n\n with open(g.TCFILE, \"r\") as tcf:\n g.encoders = [dict(name=\"None\", ext=\"COPY\", valid=\"*\")]\n e = {}\n\n for line in tcf.readlines():\n\n if line.startswith(\"TRANSCODER_PATH:\"):\n m = re.match(\"TRANSCODER_PATH:(.*)\", line).group(1)\n g.transcoder_path = m.strip()\n\n elif line.startswith(\"DELETE_ORIGINAL:\"):\n m = re.match(\"DELETE_ORIGINAL:(.*)\", line).group(1)\n do = m.strip().lower() in (\"true\", \"yes\", \"enabled\", \"on\")\n g.delete_orig = do\n\n elif line.startswith(\"name:\"):\n e['name'] = re.match(\"name:(.*)\", line).group(1).strip()\n\n elif line.startswith(\"extension:\"):\n e['ext'] = re.match(\"extension:(.*)\", line).group(1).strip()\n\n elif line.startswith(\"valid for:\"):\n e['valid'] = re.match(\"valid for:(.*)\", line).group(1).strip()\n\n elif line.startswith(\"command:\"):\n e['command'] = re.match(\"command:(.*)\", line).group(1).strip()\n\n if \"name\" in e and \"ext\" in e and \"valid\" in e:\n g.encoders.append(e)\n e = {}", "def clip_video(vid_name, start_pos=0, duration=20, i=0, output_dir='./clips'):\n\n video = VideoFileClip(vid_name).subclip(start_pos, start_pos + duration)\n video.write_videofile(os.path.join(output_dir, str(i) + '.mkv'),\n codec='libx264',\n verbose=None)", "def video_files():\n p = parse_cmdline(get_parser=get_parser_files)\n log.setup_main_handler(\n mods=(\"fogtools\", \"typhon\", \"fogpy\", \"sattools\", \"fcitools\", \"satpy\",\n \"pyresample\"),\n level=logging.INFO)\n vis.show_video_abi_glm(\n files=p.files,\n img_out=p.filename_pattern_image,\n vid_out=p.filename_pattern_video,\n out_dir=p.outdir)\n print(\"Files written to:\", p.outdir)", "async def video(self, ctx, *, arg: str):\n await ctx.send(site + self.extraire(search + self.traduire(arg.split(' ')), watch_))", "def ffmpeg_subclip_video_file(filename, t1, t2):\n subprocess.call(['ffmpeg', '-i', filename, '-ss', str(t1), '-to', str(t2), '-c', 'copy', '-y', filename.split('.')[0] + '_subclip.mp4'])\n return", "def startVideo(self,fname):\n\n\n try:\n fourcc = cv2.cv.CV_FOURCC(*'DIVX')\n\n except Exception as e:\n #print \"Exception \",e.args\n fourcc = cv2.VideoWriter_fourcc(*'DIVX')\n\n self.video = cv2.VideoWriter(fname, fourcc, 10, (self.screenWidth, self.screenHeight))\n if self.video is None:\n print \"VideoWriter failed to start.\"\n else:\n print \"VideoWriter started ok\"", "def run():\n while True:\n try:\n active = pacvert.thequeue.getActive()\n current = pacvert.thequeue.getPending()\n if (active == None) and (current != None):\n pacvert.thequeue.addActive(current)\n active = current\n\n try:\n # setting up codec specific settings\n video = {'codec': pacvert.CONFIG.DEFAULT_CODEC_VIDEO} # set the targets codec\n if pacvert.CONFIG.DEFAULT_CODEC_VIDEO_CROP: # check if cropping is enabled\n video['width'] = active.crop[0] # set width\n video['height'] = active.crop[1] # set height\n video['mode'] = 'crop' # set crop mode\n\n if pacvert.CONFIG.DEFAULT_CODEC_VIDEO == \"h264\": # if target codec is h264\n video['preset'] = pacvert.CONFIG.CODEC_AVC_PRESET # set preset\n video['profile'] = pacvert.CONFIG.CODEC_AVC_PROFILE # set profile\n video['quality'] = pacvert.CONFIG.CODEC_AVC_QUALITY # set quality\n video['tune'] = pacvert.CONFIG.CODEC_AVC_TUNE # set tune\n if pacvert.CONFIG.CODEC_AVC_AUTOMAXRATE: # if automatic maxrate is enabled\n if pacvert.CONFIG.CODEC_AVC_BUFSIZE < 0 or pacvert.CONFIG.CODEC_H264_MAXRATE < 0:\n if 'bit_rate' in active.mediainfo['Video']:\n video['maxrate'] = cast_to_int(active.mediainfo['Video']['bit_rate']) # set maxrate to video track bitrate\n video['bufsize'] = cast_to_int(active.mediainfo['Video']['bit_rate']*3) # set bufsize to three times the video bitrate\n else:\n video['maxrate'] = pacvert.CONFIG.CODEC_AVC_MAXRATE # set maxrate to given value\n video['bufsize'] = pacvert.CONFIG.CODEC_AVC_BUFSIZE # set bufsize to given value\n for anotheropt in pacvert.CONFIG.CODEC_AVC_ADDITIONALOPT: # if additional options are specified\n video[anotheropt] = pacvert.CONFIG.CODEC_AVC_ADDITIONALOPT[anotheropt] # add options to out encoding list\n elif pacvert.CONFIG.DEFAULT_CODEC_VIDEO == \"hevc\": # if target codec is hevc\n video['preset'] = pacvert.CONFIG.CODEC_HEVC_PRESET # set preset\n video['quality'] = pacvert.CONFIG.CODEC_HEVC_QUALITY # set quality\n video['tune'] = pacvert.CONFIG.CODEC_HEVC_TUNE # set tune\n if pacvert.CONFIG.CODEC_HEVC_AUTOMAXRATE: # set max rate\n if pacvert.CONFIG.CODEC_HEVC_BUFSIZE < 0 or pacvert.CONFIG.CODEC_HEVC_MAXRATE < 0:\n if 'bit_rate' in active.mediainfo['Video']:\n video['maxrate'] = cast_to_int(active.mediainfo['Video']['bit_rate']) # set maxrate to video track bitrate\n video['bufsize'] = cast_to_int(active.mediainfo['Video']['bit_rate']*3) # set bufsize to three times the video bitrate\n else:\n video['maxrate'] = pacvert.CONFIG.CODEC_HEVC_MAXRATE # set maxrate to given value\n video['bufsize'] = pacvert.CONFIG.CODEC_HEVC_BUFSIZE # set bufsize to given value\n for anotheropt in pacvert.CONFIG.CODEC_HEVC_ADDITIONALOPT: # if additional options are specified\n video[anotheropt] = pacvert.CONFIG.CODEC_HEVC_ADDITIONALOPT[anotheropt] # add options to out encoding list\n elif pacvert.CONFIG.DEFAULT_CODEC_VIDEO == \"vp8\": # if target codec is vp8\n video['quality'] = pacvert.CONFIG.CODEC_VP8_QUALITY # set video quality\n video['threads'] = pacvert.CONFIG.CODEC_VP8_THREADS # set no of real cores\n else:\n logger.error(\"Codec not yet implemented\")\n\n conv = c.convert(active.fullpath, active.outputfilename,\n {\n 'format': 'mkv',\n 'video': video,\n 'audio': {\n 'codec': pacvert.CONFIG.DEFAULT_CODEC_AUDIO,\n },\n 'subtitle': {\n 'codec': pacvert.CONFIG.DEFAULT_CODEC_SUBTITLE,\n },\n 'map': 0,\n })\n for timecode in conv:\n logger.debug(\"Converting (\"+str(timecode)+\")...\")\n active.progress = timecode\n logger.info(\"Finished File: '\"+active.fullpath+\"'\")\n active.finished = now()\n pacvert.thequeue.addFinished(pacvert.thequeue.getActive()) # set status to finished\n except FFMpegConvertError as e:\n logger.error(\"ffmpeg: \" +e.message + \" with command: \"+ e.cmd)\n\n pacvert.thequeue.addFailed(pacvert.thequeue.getActive()) # set status to failed\n time.sleep(1)\n except Exception as e:\n logger.error(e)", "def generate_video_metadata(absolute_paths):\n\n vids = []\n\n bad_fn = \"/share/pi/cleemess/file-conversion-pipeline/bad_mp4s.txt\"\n good_fn = \"/share/pi/cleemess/file-conversion-pipeline/good_mp4s.txt\"\n # if os.path.exists(bad_fn):\n # os.remove(bad_fn)\n\n if os.path.exists(bad_fn):\n with open(bad_fn) as f:\n bad_paths = set([line.strip() for line in f.readlines()])\n else:\n bad_paths = set()\n\n if os.path.exists(good_fn):\n with open(good_fn) as f:\n good_paths = set([line.strip() for line in f.readlines()])\n else:\n good_paths = set()\n \n with tqdm(list(absolute_paths)) as pbar:\n for absolute_path in pbar:\n if absolute_path in bad_paths or absolute_path in good_paths:\n continue\n\n cmd = \"ffprobe -v quiet -print_format json -show_streams %s\" % absolute_path\n try:\n subprocess.check_output(shlex.split(cmd)).decode(\"utf-8\")\n with open(good_fn, \"a\") as f:\n f.write(absolute_path + \"\\n\")\n good_paths.add(absolute_path)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n with open(bad_fn, \"a\") as f:\n f.write(absolute_path + \"\\n\")\n bad_paths.add(absolute_path)\n # print(e)\n # print(cmd)\n # raise\n\n pbar.set_description(f\"{len(good_paths)}, {len(bad_paths)}\")\n return vids", "def let_camera_update_parameters(path_to_images, name_image, video_source=\"/dev/video0\"):\n subprocess_cmd(\"ffmpeg -f video4linux2 -s 1280x720 -i {} -ss 00:00:02 -frames 1 ./{}/{} -loglevel error -nostats\".format(video_source, path_to_images, name_image))", "def generate_still_from_video(self,\n in_path: str\n ) -> Tuple[bytes, float, str]:\n out_filepath = f\"/tmp/{uuid4()}.jpg\"\n command = [\n \"ffmpeg\",\n \"-i\", in_path,\n \"-vframes\", \"1\",\n out_filepath\n ]\n\n process = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n process.wait()\n stderr = process.stderr.read().decode(\"utf-8\")\n\n # Parse start timecode\n timecode = self.parse_start_timecode_from_stderr(stderr)\n\n # Read new file back in and delete\n try:\n with open(out_filepath, \"rb\") as f:\n file_out_bytes = f.read()\n os.remove(out_filepath)\n except FileNotFoundError:\n raise TranscodeError(\"FFmpeg returned a non-zero code.\\n\" + stderr)\n\n return file_out_bytes, timecode, stderr", "def process_video(lane, fname, output):\n\tclip = VideoFileClip(fname)\n\toutput_name = output\n\toutput_clip = clip.fl_image(lane.pipeline)\n\toutput_clip.write_videofile(output_name, audio=False)\n\tprint ('Video processed successfully')", "def add_audio_to_video(audio_path: Union[str, Path],\n video_path: Union[str, Path],\n out_video_path: [str, Path]) -> Path:\n command = 'ffmpeg -loglevel warning -y -i \"{}\" -i \"{}\" -c:v copy -c:a copy -shortest {}'.format(\n video_path.as_posix(),\n audio_path.as_posix(),\n out_video_path.as_posix(),\n )\n run_command(command)\n return out_video_path", "def testSetVideoFrame():\n\n\t# create output\n\toutputFileName = \"testSetVideoFrame.mov\"\n\touputFile = av.OutputFile( outputFileName )\n\n\t# create video frame and codec\n\timageDesc = av.VideoFrameDesc()\n\timageDesc.setWidth( 1920 )\n\timageDesc.setHeight( 1080 )\n\timageDesc.setDar( 1920, 1080 )\n\n\tinputPixel = av.Pixel()\n\tinputPixel.setColorComponents( av.eComponentRgb );\n\tinputPixel.setPlanar( False );\n\n\timageDesc.setPixel( inputPixel );\n\n\tinputVideoCodec = av.VideoCodec( av.eCodecTypeEncoder, \"mpeg2video\" );\n\tinputVideoCodec.setImageParameters( imageDesc );\n\n\t# create transcoder and add a video stream\n\ttranscoder = av.Transcoder( ouputFile )\n\ttranscoder.add( \"\", 0, \"xdcamhd422\", inputVideoCodec )\n\tvideoEssence = transcoder.getStreamTranscoder( 0 ).getCurrentEssence()\n\n\t# start process\n\ttranscoder.init()\n\touputFile.beginWrap()\n\n\t# process 255 frames\n\tfor i in range(0,255):\n\t\ttranscoder.processFrame()\n\t\t# set video frame\n\t\tframe = av.VideoFrame( imageDesc )\n\t\tframe.getBuffer().assign(frame.getBuffer().size(), i)\n\t\tvideoEssence.setFrame( frame )\n\n\t# end process\n\touputFile.endWrap()\n\n\t# get dst file of transcode\n\tdst_inputFile = av.InputFile( outputFileName )\n\tprogress = av.NoDisplayProgress()\n\tdst_inputFile.analyse( progress, av.InputFile.eAnalyseLevelFast )\n\tdst_properties = dst_inputFile.getProperties()\n\tdst_videoStream = dst_properties.videoStreams[0]\n\n\tassert_equals( \"mpeg2video\", dst_videoStream.codecName )\n\tassert_equals( \"MPEG-2 video\", dst_videoStream.codecLongName )\n\tassert_equals( 1920, dst_videoStream.width )\n\tassert_equals( 1080, dst_videoStream.height )\n\tassert_equals( 16, dst_videoStream.dar.num )\n\tassert_equals( 9, dst_videoStream.dar.den )", "def main(path):\n logger.info(f'Processing video file {path}')\n # Extract audio\n audio_file = extract_audio(path, pipeline_config.audio_target_dir)\n\n # Generate sound classification results and speech recogniser results\n sound_results = SoundRecogniser().process_file(audio_file)\n sound_results = process_overlap(sound_results)\n speech_results = SpeechRecogniser().process_file(audio_file)\n\n # NLP\n wrds = get_words(speech_results)\n nlp = SpaCyNaturalLanguageProcessor(pipeline_config.spacy_model)\n custom_nlp = SpaCyNaturalLanguageProcessor(pipeline_config.custom_spacy_model)\n processor = nlp.get_spacy_results_processor(wrds, speech_results)\n custom_processor = custom_nlp.get_spacy_results_processor(wrds, speech_results)\n chunk_results = processor.process_speech_results_chunk()\n ner_results = processor.process_speech_results_ner()\n ner_results.extend(custom_processor.process_speech_results_ner())\n match_results = processor.process_speech_results_match()\n speech_results = nlp.process_spurious_words(speech_results, chunk_results)\n\n # Add Speech recogniser results, sound classification results and NLP results to a subtitle file\n subs_1 = save_to_subtitles(speech_results,\n lambda speech_result: speech_result['word'])\n subs_1 = compress_subs(subs_1)\n subs_2 = save_to_subtitles(sound_results,\n lambda sound_result: sound_result['class'])\n subs_2 = flatten_subs(subs_2)\n subs_3 = save_to_subtitles(chunk_results,\n lambda chunk_result: f'{chunk_result[\"word\"]} ({chunk_result[\"head\"]})')\n subs_4 = save_to_subtitles(ner_results,\n lambda ner_result: f'{ner_result[\"type\"]} {ner_result[\"word\"]}')\n subs_5 = save_to_subtitles(match_results,\n lambda match_result: match_result[\"word\"])\n\n combined_subs = append_subs(None, subs_1, style='bottom')\n combined_subs = append_subs(combined_subs, subs_2, exclude=['bottom'], style='top', formatter=lambda x: f'({x})')\n combined_subs = append_subs(combined_subs, subs_3, style='left')\n combined_subs = append_subs(combined_subs, subs_4, style='right')\n combined_subs = append_subs(combined_subs, subs_5, style='bottom_left_pred')\n combined_subs = remove_tiny_subs(combined_subs, duration_millis=1000, left_millis=None,\n right_millis=None, style='top')\n subtitle_file_name = os.path.splitext(path)[0] + '.ass'\n create_styles(combined_subs)\n combined_subs.save(subtitle_file_name)\n\n # Burn to a video\n burn_subtitles_into_video(path, subtitle_file_name, pipeline_config.audio_target_dir)\n logger.info(f'Done processing {audio_file}')", "def make_mp4(fname: str, stills_dir: str, fps: float, timeit: bool = False) -> None:\n\n if timeit:\n from time import time\n\n t_start = time()\n\n os.chdir(stills_dir)\n\n settings = {\n \"util\": \"ffmpeg\",\n \"infile\": \"-i img%06d.png\",\n \"video-codec\": \"-vcodec libx264\",\n \"bitrate\": \"-b:v 6000k\",\n \"frame_rate\": f\"-r {fps} -framerate {fps}\",\n \"pixel-format\": \"-pix_fmt yuv420p\",\n \"outfile\": fname,\n }\n\n log.info(\"Running ffmpeg's stills-to-mp4 conversion...\")\n\n # ffmpeg -i img%06d.png -vcodec libx264 -b:v 6000k\n # -r 24 -framerate 24 -pix_fmt yuv420p movie.mp4\n cmd = \" \".join(settings.values())\n log.debug(cmd)\n os.system(cmd)\n\n if timeit:\n # noinspection PyUnboundLocalVariable\n log.info(\"ffmpeg took %ss to run\", round(time() - t_start, 2))", "def parse_args():\n parser = argparse.ArgumentParser(description = 'Transform Video Right format')\n parser.add_argument(\n \"--input_video_file\", help=\"Path to video file\",\n default=None, required=True)\n parser.add_argument(\n \"--output_video_file\", help=\"Path to the right format video file '.mp4'\",\n default=None, required=True)\n return parser.parse_args()", "def create_movie(name, folder):\n cmd = [\"ffmpeg\", \"-framerate\", \"1\", \"-i\", folder + \"/pic%04d.png\", \"-c:v\",\n \"libx264\", \"-r\", \"30\", \"-pix_fmt\", \"yuv420p\", name]\n return subprocess.call(cmd)", "def seqIo_frImgs(fName, header=[], aviName=[], Is=[], sDir=[], name='I', ndig=5, f0=0, f1=1e6):\n \n if aviName!=[]: #avi movie exists\n vc = cv2.VideoCapture(aviName)\n if vc.isOpened(): rval = True\n else:\n rval = False\n print('video not readable')\n return\n fps = vc.get(cv2.cv.CV_CAP_PROP_FPS)\n NUM_FRAMES = int(vc.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))\n print(NUM_FRAMES)\n IM_TOP_H = vc.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)\n IM_TOP_W = vc.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)\n header['width']=IM_TOP_W\n header['height']=IM_TOP_H\n header['fps']=fps\n\n sw = seqIo_writer(fName,header)\n print('creating seq from AVI')\n # initialize timer\n timer = pb.ProgressBar(widgets=['Converting ', pb.Percentage(), ' -- ',\n pb.FormatLabel('Frame %(value)d'), '/',\n pb.FormatLabel('%(max)d'), ' [', pb.Timer(), '] ',\n pb.Bar(), ' (', pb.ETA(), ') '], maxval=NUM_FRAMES)\n for f in range(NUM_FRAMES):\n rval, im = vc.read()\n if rval:\n im= im.astype(np.uint8)\n sw.addFrame(im)\n timer.update(f)\n sw.close()\n timer.finish()\n elif Is==[]:\n assert(os.path.isdir(sDir))\n sw = seqIo_writer(fName,header)\n frmstr = '%s/%s%%0%ii.%s' % (sDir,name,ndig,header.ext)\n for frame in range(f0,f1):\n f = frmstr % frame\n if not os.path.isfile(f):break\n fid = open(f, 'r')\n if fid<0: sw.close(); assert(False)\n I = fid.read()\n fid.close()\n b = bytearray(I)\n assert (b[0] == 255 and b[1] == 216 and b[-2] == 255 and b[-1] == 217); # JPG\n I = np.array(list(b)).astype(np.uint8)\n sw.addFrame(I,0,0)\n sw.close()\n if frame==f0: print('No images found')\n else:\n nd = len(Is.shape)\n if nd==2: nd=3\n assert(nd<=4)\n nFrm = Is.shape[nd-1]\n header['height']=Is.shape[0]\n header['width']=Is.shape[1]\n sw =seqIo_writer(fName,header)\n if nd==3:\n for f in range(nFrm): sw.addFrame(Is[:,:,f])\n if nd==4:\n for f in range(nFrm): sw.addFrame(Is[:,:,:,f])\n sw.close()", "def encode (self, frames, fps, destinationPath = None, preset = None):\n # generate a file name hash by source frames names fps and preset.\n hc = hash (\"video\", \"h264\", \"mp4\", fps, preset)\n for frame in frames:\n hc = hash (hc, str (pathlib.Path (frame).resolve ()))\n\n # check if file is already in cache\n cachePath = pathlib.Path (cache.persistentPath (hc, self.extension ())).resolve ()\n if cachePath.exists ():\n # return cached file or create copy\n if destinationPath == None:\n return str (cachePath)\n else:\n try:\n copyfile (cachePath, pathlib.Path (destinationPath))\n except:\n return None\n return str (destinationPath)\n\n # video doesn't exist, create it...\n\n # Encode via parent encoder (get avi file path)\n preEncoded = AviH264.encode (frames, fps, None, preset)\n\n # create temp working directory\n tempDir = cache.temporary ()\n os.makedirs (tempDir)\n\n # symlink video into temporary directory\n os.symlink (preEncoded, tempDir + os.path.sep + 'input.avi')\n\n # process inside temporary directory\n lastDir = os.path.abspath (os.curdir)\n os.chdir (tempDir)\n\n # TODO:\n silent = True\n\n # unpack h264 stream\n unpackCommand = [_MP4H264Encoder._getMP4BoxRunnable (), \"-aviraw\", \"video\", 'input.avi']\n result = subprocess.run (unpackCommand, capture_output=silent)\n if result.returncode != 0:\n if silent:\n print (result.stderr)\n print (result.stdout)\n try:\n os.chdir (lastDir)\n rmtree (tempDir)\n except:\n pass\n return None\n\n # temporary output file\n cacheFileTemp = \"output.mp4\"\n\n # pack mp4 file\n packCommand = [_MP4H264Encoder._getMP4BoxRunnable (), \"-add\", \"input_video.h264\", cacheFileTemp]\n result = subprocess.run (packCommand, capture_output=silent)\n if result.returncode != 0:\n if silent:\n print (result.stderr)\n print (result.stdout)\n try:\n os.chdir (lastDir)\n rmtree (tempDir)\n except:\n pass\n return None\n\n # copy to cache\n cacheFile = cache.persistentPath (hc, self.extension (), True)\n os.rename (cacheFileTemp, cacheFile)\n\n # leave & remove temporary directory\n try:\n os.chdir (lastDir)\n rmtree (tempDir)\n except:\n pass\n\n # need to copy to output file?\n if destinationPath == None:\n return str (cacheFile)\n else:\n try:\n copyfile (cacheFile, pathlib.Path (destinationPath))\n except:\n return None\n return str (destinationPath)", "def __call__(self, video_path, per_frames = 1 , offset = None):\n \n cap = cv2.VideoCapture(video_path)\n \n if not cap.isOpened():\n raise Exception(\"Video file does not exist or is invalid\")\n\n \n if offset:\n cap.set(cv2.CAP_PROP_POS_MSEC, offset)\n \n \n info = []\n\n while cap.isOpened():\n ret, frame = cap.read()\n if ret:\n if cap.get(cv2.CAP_PROP_POS_FRAMES) % per_frames == 0:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n faces_info = self.detect_faces_from_image(frame,\n desired_width=224, desired_height=224) \n if faces_info:\n for element in faces_info:\n face_img = image.img_to_array(element[1])\n\n face_img = utils.preprocess_input(face_img, version=1)\n face_img = np.expand_dims(face_img, axis=0)\n\n features = self.vgg_feature_extractor.predict(face_img)\n label = self.gender_svm.predict(features)[0]\n decision_value = round(self.gender_svm.decision_function(features)[0], 3)\n\n bounding_box = element[0][0]\n detection_score = round(element[5], 3)\n bbox_length = bounding_box.bottom() - bounding_box.top()\n\n info.append([\n cap.get(cv2.CAP_PROP_POS_FRAMES), bounding_box, (bbox_length, bbox_length), label,\n decision_value, detection_score\n ])\n\n else:\n break\n cap.release()\n info = pd.DataFrame.from_records(info, columns = ['frame', 'bb', 'size','label', 'decision', 'conf'])\n return info", "def _unroll_video(self, video: int) -> None:\n video_file = self.dataset_name + '_' + str(video).zfill(2) + '.mp4'\n\n # Create camera directory to store all frames\n camera = 'camera' + str(video).zfill(2)\n camera_dir = os.path.join(self.videos_dir, camera)\n os.mkdir(camera_dir)\n\n if self.image_format == 'jpeg':\n unroll = subprocess.run([\"ffmpeg\", \"-i\", os.path.join(self.dataset_dir, video_file), \"-qscale:v\", \"2\", \"-vf\", \"scale=1280:720\",\n os.path.join(camera_dir, self.frame_format + \".\" + self.image_format)])\n else:\n unroll = subprocess.run([\"ffmpeg\", \"-i\", os.path.join(self.dataset_dir, video_file), \"-vf\", \"scale=1280:720\",\n os.path.join(camera_dir, self.frame_format + \".\" + self.image_format)])\n # print(\"The exit code was: %d\" % unroll.returncode)", "def __init__(self, ffmpeg_path=None, ffprobe_path=None):\n\n def which(name):\n path = os.environ.get_parser('PATH', os.defpath)\n for d in path.split(':'):\n fpath = os.path.join(d, name)\n if os.path.exists(fpath) and os.access(fpath, os.X_OK):\n return fpath\n return None\n\n if ffmpeg_path is None:\n ffmpeg_path = 'ffmpeg'\n\n if ffprobe_path is None:\n ffprobe_path = 'ffprobe'\n\n if '/' not in ffmpeg_path:\n ffmpeg_path = which(ffmpeg_path) or ffmpeg_path\n if '/' not in ffprobe_path:\n ffprobe_path = which(ffprobe_path) or ffprobe_path\n\n self.ffmpeg_path = ffmpeg_path\n self.ffprobe_path = ffprobe_path\n\n if not os.path.exists(self.ffmpeg_path):\n raise FFMpegError(\"ffmpeg binary not found: \" + self.ffmpeg_path)\n\n if not os.path.exists(self.ffprobe_path):\n raise FFMpegError(\"ffprobe binary not found: \" + self.ffprobe_path)\n\n self.hwaccels = []\n\n self.encoders = []\n self.decoders = []\n\n self._getcapabilities()", "def generate_video(sign, issue, output):\n\n videos = {\n \"Climate Change\": \"ClimateChange.mp4\",\n \"Green Jobs\": \"GreenJobs.mp4\",\n \"Tourism\": \"Tourism.mp4\",\n \"Small Business\": \"SmallBusiness.mp4\",\n \"Public health\": \"PublicHealth.mp4\",\n \"Education Funding\": \"EducationFunding.mp4\"\n }\n\n video_path = CWD(f\"Assets/{videos[issue]}\")\n\n frame = cv2.imread(sign)\n frame = cv2.resize(frame, (1920, 1080))\n height, width, layers = frame.shape\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n video = cv2.VideoWriter(CWD(\"temp.mp4\"), fourcc, 1, (width, height))\n for i in range(5):\n video.write(frame)\n video.release()\n\n image_clip = VideoFileClip(CWD(\"temp.mp4\"))\n original_video = VideoFileClip(video_path)\n final_video = concatenate_videoclips([original_video, image_clip], method=\"compose\")\n\n final_video.write_videofile(output)\n os.remove(CWD(\"temp.mp4\"))", "def convert(self, infile, outfile, opts, timeout=10, preopts=None, postopts=None):\n if os.name == 'nt':\n timeout = 0\n\n if not os.path.exists(infile):\n raise FFMpegError(\"Input file doesn't exist: \" + infile)\n\n cmds = [self.ffmpeg_path]\n if preopts:\n cmds.extend(preopts)\n cmds.extend(['-i', infile])\n\n # Move additional inputs to the front of the line\n for ind, command in enumerate(opts):\n if command == '-i':\n cmds.extend(['-i', opts[ind + 1]])\n del opts[ind]\n del opts[ind]\n\n cmds.extend(opts)\n if postopts:\n cmds.extend(postopts)\n cmds.extend(['-y', outfile])\n\n if timeout:\n def on_sigalrm(*_):\n signal.signal(signal.SIGALRM, signal.SIG_DFL)\n raise Exception('timed out while waiting for ffmpeg')\n\n signal.signal(signal.SIGALRM, on_sigalrm)\n\n try:\n p = self._spawn(cmds)\n except OSError:\n raise FFMpegError('Error while calling ffmpeg binary')\n\n yielded = False\n buf = ''\n total_output = ''\n pat = re.compile(r'time=([0-9.:]+) ')\n\n while True:\n if timeout:\n signal.alarm(timeout)\n\n ret = p.stderr.read(10)\n\n if timeout:\n signal.alarm(0)\n\n if not ret:\n # For small or very fast jobs, ffmpeg may never output a '\\r'. When EOF is reached, yield if we haven't yet.\n if not yielded:\n yielded = True\n yield 10\n break\n\n try:\n ret = ret.decode(console_encoding)\n except UnicodeDecodeError:\n try:\n ret = ret.decode(console_encoding, errors=\"ignore\")\n except:\n pass\n\n total_output += ret\n buf += ret\n if '\\r' in buf:\n line, buf = buf.split('\\r', 1)\n\n tmp = pat.findall(line)\n if len(tmp) == 1:\n timespec = tmp[0]\n if ':' in timespec:\n timecode = 0\n for part in timespec.split(':'):\n timecode = 60 * timecode + float(part)\n else:\n timecode = float(tmp[0])\n yielded = True\n yield timecode\n\n if timeout:\n signal.signal(signal.SIGALRM, signal.SIG_DFL)\n\n p.communicate() # wait for process to exit\n\n if total_output == '':\n raise FFMpegError('Error while calling ffmpeg binary')\n\n cmd = ' '.join(cmds)\n if '\\n' in total_output:\n line = total_output.split('\\n')[-2]\n\n if line.startswith('Received signal'):\n # Received signal 15: terminating.\n raise FFMpegConvertError(line.split(':')[0], cmd, total_output, pid=p.pid)\n if line.startswith(infile + ': '):\n err = line[len(infile) + 2:]\n raise FFMpegConvertError('Encoding error', cmd, total_output,\n err, pid=p.pid)\n if line.startswith('Error while '):\n raise FFMpegConvertError('Encoding error', cmd, total_output,\n line, pid=p.pid)\n if not yielded:\n raise FFMpegConvertError('Unknown ffmpeg error', cmd,\n total_output, line, pid=p.pid)\n if p.returncode != 0:\n raise FFMpegConvertError('Exited with code %d' % p.returncode, cmd,\n total_output, pid=p.pid)\n\n return outfile", "def copy_audio_from_another_video(no_audio_video_path: Union[str, Path],\n with_audio_video_path: [str, Path],\n out_video_path: [str, Path]) -> Path:\n no_audio_video_path = Path(no_audio_video_path)\n with_audio_video_path = Path(with_audio_video_path)\n out_video_path = Path(out_video_path)\n\n command = 'ffmpeg -loglevel warning -y -i {} -i {} -c copy -map 0:0 -map 1:1 -shortest {}'.format(\n no_audio_video_path.as_posix(), with_audio_video_path.as_posix(),\n out_video_path.as_posix())\n run_command(command)\n return out_video_path", "def main():\n input_video = sys.argv[1]\n input_audio = sys.argv[2]\n output_video = sys.argv[3]\n set_audio(input_video, input_audio, output_video)", "def main():\n parser = argparse.ArgumentParser(description=\"Tracks adult fish\")\n # add options for argument parser\n parser.add_argument(\"in_path\",\n help=\"Path to the video directory.\")\n parser.add_argument(\"out_path\",\n help=\"Directory for results. Should be empty.\")\n parser.add_argument(\"-x\", \"--keep_temp\", action=\"store_true\",\n help=\"Keep temporary folder after execution.\")\n parser.add_argument(\"--visual\", action=\"store_true\",\n help=\"shows a visual representation of the tracking progress.\")\n\n # parse arguments from command line\n args = parser.parse_args()\n # get all file names and directories ready\n out_dir, temp_dir, video_bases, videos = housekeeping(args)\n borders = []\n for i in range(len(videos)):\n v = videos[i]\n get_borders(borders, temp_dir, v)\n\n for i in range(len(videos)):\n vbn = video_bases[i]\n v = videos[i]\n scaled_video = \"scaled_\" + vbn + \".avi\"\n ffmpeg = Ffmpeg(v, os.path.join(temp_dir, scaled_video))\n ffmpeg.f = \"avi\"\n ffmpeg.vcodec = \"libx264rgb\"\n ffmpeg.width = 480\n ffmpeg.run()\n\n for i in range(len(videos)):\n vbn = video_bases[i]\n pts = tracker(args, temp_dir, vbn)\n border = borders[i]\n tracks_lower, tracks_upper = split_tracks(border, pts)\n analysis = Analysis(tracks_lower, tracks_upper, px_size=0.06)\n analysis.analyze(os.path.join(out_dir, 'stats.txt'), vbn, vel=True)\n\n if not args.keep_temp:\n shutil.rmtree(temp_dir)", "def main():\n convert(\"env_100000.mp4\", TargetFormat.GIF)", "def transcode_segment(self,\n in_path: str,\n profile: TranscodeProfile\n ) -> Tuple[bytes, str, str]:\n out_filepath = f\"/tmp/{uuid4()}.ts\"\n transcode_command = [\n \"ffmpeg\",\n \"-i\", in_path,\n \"-vf\", f\"scale={profile.video_width}:-1\",\n *profile.get_video_transcode_parameters(),\n \"-bsf:v\", \"h264_mp4toannexb\",\n *profile.get_audio_transcode_parameters(),\n \"-copyts\", \"-muxdelay\", \"0\",\n \"-preset\", profile.video_preset,\n out_filepath\n ]\n\n process = subprocess.Popen(transcode_command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n process.wait()\n stderr = process.stderr.read().decode(\"utf-8\")\n\n # Read new file back in and delete\n try:\n with open(out_filepath, \"rb\") as f:\n file_out_bytes = f.read()\n os.remove(out_filepath)\n except FileNotFoundError:\n raise TranscodeError(\"FFmpeg returned a non-zero code.\\n\" + stderr)\n\n return file_out_bytes, stderr, transcode_command", "def convert_to_mp3(self,path, filename):\n\n codec = \"libmp3lame\"\n mp3_filename = filename + \".mp3\"\n\n command = [self.FFMPEG_BIN,\n \"-n\",\n \"-i\", path,\n \"-acodec\", codec,\n \"-ab\", \"128k\",\n mp3_filename\n ]\n\n return command", "def frame_dump(filename, frametime, output_filename='out.png', \n meth='ffmpeg fast', subseek_cushion=20., verbose=False, dry_run=False,\n very_verbose=False):\n \n if meth == 'mplayer':\n raise ValueError(\"mplayer not supported\")\n elif meth == 'ffmpeg best':\n # Break the seek into a coarse and a fine\n coarse = np.max([0, frametime - subseek_cushion])\n fine = frametime - coarse\n syscall = 'ffmpeg -y -ss %r -i %s -ss %r -vframes 1 %s' % (\n coarse, filename, fine, output_filename)\n elif meth == 'ffmpeg accurate':\n syscall = 'ffmpeg -y -i %s -ss %r -vframes 1 %s' % (\n filename, frametime, output_filename)\n elif meth == 'ffmpeg fast':\n syscall = 'ffmpeg -y -ss %r -i %s -vframes 1 %s' % (\n frametime, filename, output_filename)\n \n if verbose:\n print(syscall)\n if not dry_run:\n #os.system(syscall)\n syscall_l = syscall.split(' ')\n syscall_result = subprocess.check_output(syscall_l, \n stderr=subprocess.STDOUT)\n if very_verbose:\n print(syscall_result)", "def compress_video(x: np.ndarray, video_format: str, constant_rate_factor: int, dir_: str = \"\"):\n import ffmpeg\n\n video_path = os.path.join(dir_, f\"tmp_video.{video_format}\")\n _, height, width, _ = x.shape\n\n # numpy to local video file\n process = (\n ffmpeg.input(\"pipe:\", format=\"rawvideo\", pix_fmt=\"rgb24\", s=f\"{width}x{height}\")\n .output(video_path, pix_fmt=\"yuv420p\", vcodec=\"libx264\", crf=constant_rate_factor)\n .overwrite_output()\n .run_async(pipe_stdin=True, quiet=True)\n )\n process.stdin.write(x.flatten().astype(np.uint8).tobytes())\n process.stdin.close()\n process.wait()\n\n # local video file to numpy\n stdout, _ = (\n ffmpeg.input(video_path)\n .output(\"pipe:\", format=\"rawvideo\", pix_fmt=\"rgb24\")\n .run(capture_stdout=True, quiet=True)\n )\n return np.frombuffer(stdout, np.uint8).reshape(x.shape)", "def get_mpeg_info(videos_dir, filename):\n logger.info(\"Getting info from %s/%s\" % (videos_dir, filename))\n if not os.path.exists(videos_dir):\n raise Exception(\"%s dir does not exist!\" % videos_dir)\n path = os.path.join(videos_dir, filename)\n if not os.path.exists(path):\n raise Exception(\"%s does not exist!\" % path)\n\n p = subprocess.Popen([FFMPEG, \"-i\", filename], cwd=videos_dir,\n stderr=subprocess.STDOUT, stdout=subprocess.PIPE)\n rc = p.wait()\n\n out = p.stdout.read()\n pattern = r'Video: mpeg2video \\(Main\\), (?P<vdata>.*?)\\n'\n m = re.search(pattern, out)\n\n if not m:\n raise Exception(\"Failed to search mpeg info: '%s'\" % out)\n\n vdata = m.groups()[0]\n mdata = vdata.split(\", \")\n logger.info(mdata)\n\n resolution = mdata[1].split(\" \")[0]\n (width, height) = resolution.split(\"x\")\n width = int(width)\n height = int(height)\n logger.info(\"%dx%d\" % (width, height))\n\n bitrate = mdata[2].split(\" \")[0] # kb/s\n\n fps = float(mdata[3].split(\" \")[0])\n\n return {\n \"width\": width,\n \"height\": height,\n \"bitrate\": bitrate, # kb/s\n \"fps\": fps,\n }", "def ffmpeg_extract_frame(filename, t1, targetname):\n\n cmd = [get_setting(\"FFMPEG_BINARY\"),\n \"-i\", filename,\n \"-ss\", \"%0.2f\" % t1,\n \"-vframes\", \"1\", targetname]\n\n subprocess_call(cmd)", "def __merge_ts_by_ffmepg(self, local_m3u8_path, video_name):\n try:\n command = 'ffmpeg -allowed_extensions ALL -i {} -c copy -y {}'.format(local_m3u8_path, video_name)\n print(command)\n os.system(command)\n print('merge succeed.')\n except:\n print('merge failed.')", "def video_codec(self):\n # type: () -> string_types\n return self._video_codec", "def encode_h265(track_metadata, preset):\n\tprint(\"---- Encoding\", track_metadata.file_name, \"to H265...\")\n\tnew_file_name = track_metadata.file_name + \".265\"\n\tstats_file = track_metadata.file_name + \".stats\"\n\tvapoursynth_script = track_metadata.file_name + \".vpy\"\n\n\tx265_presets = {\n\t\t\"hdanime\": {\n\t\t\t\"preset\": \"8\",\n\t\t\t\"bitrate\": \"800\",\n\t\t\t\"deblock\": \"1:1\"\n\t\t},\n\t\t\"uhd\": {\n\t\t\t\"preset\": \"7\",\n\t\t\t\"bitrate\": \"3500\",\n\t\t\t\"deblock\": \"-2:0\"\n\t\t},\n\t\t\"dvd\": {\n\t\t\t\"preset\": \"8\",\n\t\t\t\"bitrate\": \"600\",\n\t\t\t\"deblock\": \"-2:0\"\n\t\t}\n\t}\n\n\t#The encoding process produces some side effects that may need cleaning up.\n\t#Some are normally cleaned up but if the encoding is interrupted, be sure to delete them anyway.\n\tsideeffect_files = [\n\t\ttrack_metadata.file_name + \".stats.cutree.temp\",\n\t\ttrack_metadata.file_name + \".stats.temp\",\n\t\ttrack_metadata.file_name + \".ffindex\",\n\t\ttrack_metadata.file_name + \".stats.cutree\",\n\t\ttrack_metadata.file_name + \".stats\"\n\t]\n\n\t#Generate VapourSynth script.\n\tif preset == \"dvd\":\n\t\tif track_metadata.interlaced:\n\t\t\tif track_metadata.interlace_field_order == \"tff\":\n\t\t\t\tvsscript = \"dvd_tff\"\n\t\t\telse:\n\t\t\t\tvsscript = \"dvd_bff\"\n\t\telse:\n\t\t\tvsscript = \"dvd_noninterlaced\"\n\telse:\n\t\tvsscript = preset\n\tscript_source = vsscript + \".vpy\"\n\ttry:\n\t\twith open(os.path.join(os.path.split(__file__)[0], script_source)) as f:\n\t\t\tscript = f.read()\n\t\tscript = script.format(input_file=track_metadata.file_name)\n\t\twith open(vapoursynth_script, \"w\") as f:\n\t\t\tf.write(script)\n\n\t\tvspipe_command = [\"vspipe\", \"--y4m\", vapoursynth_script, \"-\"]\n\t\tx265_command = [\n\t\t\t\"x265\",\n\t\t\t\"-\",\n\t\t\t\"--y4m\",\n\t\t\t\"--fps\", str(track_metadata.fps),\n\t\t\t\"--preset\", x265_presets[preset][\"preset\"],\n\t\t\t\"--bitrate\", x265_presets[preset][\"bitrate\"],\n\t\t\t\"--deblock\", x265_presets[preset][\"deblock\"],\n\t\t\t\"-b\", \"12\",\n\t\t\t\"--psy-rd\", \"0.4\",\n\t\t\t\"--aq-strength\", \"0.5\",\n\t\t\t\"--stats\", stats_file\n\t\t]\n\t\tx265_pass1 = [\"--pass\", \"1\", \"-o\", \"/dev/null\"]\n\t\tx265_pass2 = [\"--pass\", \"2\", \"-o\", new_file_name]\n\t\tpass1_command = \" \".join(vspipe_command) + \" | \" + \" \".join(x265_command + x265_pass1)\n\t\tprint(pass1_command)\n\t\tprocess = subprocess.Popen(pass1_command, shell=True)\n\t\t(cout, cerr) = process.communicate()\n\t\texit_code = process.wait()\n\t\tif exit_code != 0: #0 is success.\n\t\t\traise Exception(\"First x265 pass failed with exit code {exit_code}.\".format(exit_code=exit_code))\n\t\tpass2_command = \" \".join(vspipe_command) + \" | \" + \" \".join(x265_command + x265_pass2)\n\t\tprint(pass2_command)\n\t\tprocess = subprocess.Popen(pass2_command, shell=True)\n\t\t(cout, cerr) = process.communicate()\n\t\texit_code = process.wait()\n\t\tif exit_code != 0: #0 is success.\n\t\t\traise Exception(\"Second x265 pass failed with exit code {exit_code}.\".format(exit_code=exit_code))\n\tfinally:\n\t\t#Delete old files and temporaries.\n\t\tfor file_name in [track_metadata.file_name, stats_file, vapoursynth_script] + sideeffect_files:\n\t\t\tif os.path.exists(file_name):\n\t\t\t\tos.remove(file_name)\n\n\ttrack_metadata.file_name = new_file_name\n\ttrack_metadata.codec = \"h265\"", "def disassemble(filepath, fps=None, frame_interval=None, loglevel='panic', image_ext='jpg'):\n try:\n import ffmpeg\n except ImportError:\n logger.error(\n 'Import Error! Cant import ffmpeg. '\n 'Annotations operations will be limited. import manually and fix errors')\n raise\n # get video information\n video_props = Videos.get_info(filepath)\n if 'system' in video_props and \\\n 'nb_frames' in video_props['system'][0]:\n nb_frames = video_props['streams'][0]['nb_frames']\n else:\n try:\n import cv2\n except (ImportError, ModuleNotFoundError):\n logger.error(\n 'Import Error! Cant import cv2. '\n 'Annotations operations will be limited. import manually and fix errors')\n raise\n nb_frames = int(cv2.VideoCapture(filepath).get(cv2.CAP_PROP_FRAME_COUNT))\n\n if not os.path.isfile(filepath):\n raise IOError('File doesnt exists: {}'.format(filepath))\n basename, ext = os.path.splitext(filepath)\n # create folder for the frames\n if os.path.exists(basename):\n shutil.rmtree(basename)\n\n os.makedirs(basename, exist_ok=True)\n\n if fps is None:\n try:\n fps = eval(video_props['streams'][0]['avg_frame_rate'])\n except ZeroDivisionError:\n fps = 0\n num_of_zeros = len(str(nb_frames))\n # format the output filename\n output_regex = os.path.join(basename, '%0{}d.{}'.format(num_of_zeros, image_ext))\n\n try:\n if frame_interval is not None:\n frame_number = 0\n select = \"\"\n while frame_number < nb_frames:\n if select != \"\":\n select += '+'\n select += 'eq(n\\\\,{})'.format(frame_number)\n frame_number += frame_interval\n stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_regex,\n **{'start_number': '0',\n 'vf': 'select=\\'{}'.format(select),\n 'vsync': 'vfr'})\n else:\n stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_regex,\n **{'start_number': '0',\n 'r': str(fps)})\n\n ffmpeg.overwrite_output(stream).run()\n except Exception:\n logger.error('ffmpeg error in disassemble:')\n raise\n return basename", "def process_video(video_dir, save_dir):\n for sig_vid in tqdm(find_files(video_dir, '*.{}'.format(VID_FORMAT))):\n \n vc = cv2.VideoCapture(sig_vid) \n width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n rig_bot_height, rig_bot_width = height // 2, width // 2\n\n if rig_bot_height == 540 and rig_bot_width == 960:\n # right bottom, r_h, l_w, r_w\n iou = [390, 90, 890]\n\n elif rig_bot_height == 720 and rig_bot_width == 1280:\n log.info('high resolution video, please confirm iou param')\n\n else:\n assert 'please confirm video resolution'\n\n count = 0\n cout_save = 0\n\n while vc: \n rval, frame = vc.read() \n\n if rval == True:\n count += 1\n # fisheye extract front preview\n ext_region = frame[rig_bot_height:, rig_bot_width:]\n cv2.imshow('ori frame', ext_region)\n\n key = cv2.waitKey(0) & 0xFF\n if key == ord('q'):\n break\n\n elif key == ord('s'): \n # Interval 20 frame save \n if cout_save % 20 == 0 or cout_save > 20: \n file_name = create_files(save_dir, sig_vid)\n img_res = process_frame(ext_region, iou)\n cv2.imwrite(os.path.join(save_dir, file_name)+\"/\"+ file_name+\"_{}.jpg\".format(count),img_res)\n cout_save = 0\n log.info('successful save current frame {}'.format(count))\n\n else:\n cout_save += 1\n continue\n cout_save += 1\n\n else:\n # skip current frame and cout pre save frame interval\n if cout_save > 0:\n cout_save += 1\n continue\n\n else:\n break\n \n vc.release()\n cv2.destroyAllWindows()", "def test_ffmpeg_in_path(self) -> None:\n self.assertIsNotNone(which('ffmpeg'))", "def screenDataToPNG(self, rawFile, destFile, ffmpeg):\n\n args = [ffmpeg, '-vcodec rawvideo', '-f rawvideo', '-pix_fmt rgb565', \n '-s 320*480', '-i', rawFile, '-f image2', '-vcodec png', '%s.png' % destFile]\n \n # Something tricky here, need args.split(' ')\n args = ' '.join(args)\n try:\n ffmpegProcess = subprocess.Popen(args.split(' '),\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE)\n \n except OSError, osErr:\n raise EmulatorClientError('-Failed to run ffmpeg command \\'%s\\': %s' % (args, osErr.strerror),\n theCode=EmulatorClientError.FFMPEG_RUN_ERROR,\n theBaseError=osErr)\n except:\n exc = traceback.format_exc()\n self.log.exce(exc)\n retval = ffmpegProcess.communicate()\n\n #adb.wait() \n self.log.info('-Result: %s' % str(retval))\n return retval", "def show_video(path: str): \n video_path = sorted(glob(path + \"/*.mp4\"))[-1]\n video = io.open(video_path, 'r+b').read()\n encoded = base64.b64encode(video)\n\n return HTML(data='''<video alt=\"test\" controls>\n <source src=\"data:video/mp4;base64,{0}\" type=\"video/mp4\" /> </video>'''\n .format(encoded.decode('ascii')))", "def convert_to_ogg(self, path, filename):\n\n codec = \"libvorbis\"\n ogg_filename = filename + \".ogg\"\n\n command = [self.FFMPEG_BIN,\n \"-n\",\n \"-i\", path,\n \"-acodec\", codec,\n \"-aq\", \"60\",\n \"-vn\",\n \"-ac\", \"2\",\n ogg_filename\n ]\n\n return command", "def __init__(\n self, executable=\"ffmpeg\", global_options=None, inputs=None, outputs=None\n ):\n self.executable = executable\n self._cmd = [executable]\n\n global_options = global_options or []\n if _is_sequence(global_options):\n normalized_global_options = []\n for opt in global_options:\n normalized_global_options += shlex.split(opt)\n else:\n normalized_global_options = shlex.split(global_options)\n\n self._cmd += normalized_global_options\n self._cmd += _merge_args_opts(inputs, add_input_option=True)\n self._cmd += _merge_args_opts(outputs)\n\n self.cmd = subprocess.list2cmdline(self._cmd)\n self.process = None", "def obtenerVideo(camara):\n val, frame = camara.read()\n return val, frame", "def video2():\n return Response(gen_frames(2),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "def analyze_movie(\n video_path, aspect_ratio=0, palette_size=32, frames=-1, step=1, show_frames=False, show_last_frame=False, color_format='hex'\n):\n\n # Parse video frame-by-frame\n vidcap = cv2.VideoCapture(video_path)\n success, image = vidcap.read()\n pil_img = None\n count = 0\n while success and frames == -1 or count < frames:\n if count % step == 0:\n # Convert to PIL image\n img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n pil_img = Image.fromarray(img)\n\n # Crop frame to remove border\n if aspect_ratio != 0:\n width, height = pil_img.size\n left = 0\n right = width\n content_height = 1/aspect_ratio * width\n border = (height - content_height) * 0.5\n top = border\n bottom = border + content_height\n pil_img = pil_img.crop((left, top, right, bottom))\n\n # Get primary color\n main_color = get_primary_color(\n pil_img, palette_size, show_img=show_frames)\n\n if color_format == 'hex':\n main_color = rgbToHex(main_color)\n \n print(main_color)\n\n # Attempt to read next frame\n success, image = vidcap.read()\n count += 1\n\n if show_last_frame:\n pil_img.show()" ]
[ "0.67609626", "0.6545024", "0.64391136", "0.6160051", "0.6078338", "0.60496306", "0.6016545", "0.6010689", "0.5968558", "0.59620136", "0.5842451", "0.583345", "0.5804066", "0.5784901", "0.5773912", "0.5729956", "0.56486654", "0.56443024", "0.5613024", "0.5551656", "0.54755616", "0.5473268", "0.54720014", "0.54596263", "0.54592025", "0.5432067", "0.5398187", "0.539504", "0.539504", "0.5384447", "0.5377688", "0.5371479", "0.53597385", "0.5356194", "0.5351204", "0.53294045", "0.5319955", "0.53134006", "0.5307954", "0.5307663", "0.53053725", "0.53029346", "0.5287678", "0.5286814", "0.5280297", "0.5273725", "0.5263608", "0.5253262", "0.5244481", "0.52393544", "0.5232301", "0.5224302", "0.521826", "0.5212703", "0.5212671", "0.5209241", "0.51978153", "0.5192144", "0.51715595", "0.5163106", "0.5153734", "0.51523376", "0.5152251", "0.5150039", "0.51428235", "0.5140191", "0.513434", "0.5124201", "0.5115328", "0.51129717", "0.5110497", "0.5103786", "0.5086081", "0.5076736", "0.50749946", "0.50726026", "0.5067549", "0.5064815", "0.5062916", "0.50593084", "0.5047005", "0.5046887", "0.5040207", "0.5038057", "0.5032801", "0.5030932", "0.5010395", "0.5009557", "0.5004829", "0.49981573", "0.49832684", "0.49806294", "0.4980268", "0.49742842", "0.49611625", "0.49296206", "0.49260753", "0.49254543", "0.49246085", "0.4923067" ]
0.6554189
1
Verifies credentials for username and password. Returns None on success or a string describing the error on failure Adapt to your needs
def check_credentials(username, password): LDAP_SERVER = 'ldap://172.24.1.102:389' # fully qualified AD user name LDAP_USERNAME = '%s' % username # your password LDAP_PASSWORD = password base_dn = 'DC=LGE,DC=NET' ldap_filter = 'userPrincipalName=%s' % username attrs = ['memberOf'] #print "entered username : %s " % username #print "entered password : %s " % password try: # build a client ldap_client = ldap.initialize(LDAP_SERVER) #print ldap_client # perform a synchronous bind ldap_client.set_option(ldap.OPT_REFERRALS,0) ldap_client.simple_bind_s(LDAP_USERNAME, LDAP_PASSWORD) except ldap.INVALID_CREDENTIALS: ldap_client.unbind() return 'Wrong username or password' except ldap.SERVER_DOWN: return 'AD server not available' # all is well ldap_client.unbind() return "success"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_auth_password(self, username, password):\n return AUTH_FAILED", "def check_credentials(username, password):\n\t\n\tconn = sqlite3.connect('db/user_task.db')\n\tcursor = conn.execute(\"SELECT password from user WHERE username == \\'%s\\'\" % (username))\n\tdata = cursor.fetchall()\n\tconn.close()\n\n\tif len(data) == 0:\n\t\treturn u\"Incorrect username\"\n\n\tfor row in data:\n\t\tencoded_password = hashlib.sha1(password.encode('utf-8')).hexdigest()\n\t\tif row[0] == encoded_password:\n\t\t\treturn None\n\n\treturn u\"Incorrect password\"\n\t\n\t# An example implementation which uses an ORM could be:\n\t# u = User.get(username)\n\t# if u is None:\n\t# return u\"Username %s is unknown to me.\" % username\n\t# if u.password != md5.new(password).hexdigest():\n\t# return u\"Incorrect password\"", "def check_credentials(username, password):\n\n return db.auth_user(username, password)", "def check_auth(username, password, expected_user, expected_pw):\n return username == expected_user and password == expected_pw", "def verify_pw(username, password):\n credentials = HtpasswdFile(app.config[\"CREDENTIAL_FILE\"])\n if not credentials.check_password(username, password):\n logging.warning(\"%s tried to login with wrong password\", username)\n return False\n return True", "def check_auth_password(self, username, password):\n if username == self.username and password == self.password:\n return paramiko.AUTH_SUCCESSFUL\n return paramiko.AUTH_FAILED", "def check_auth(username, password):\n return username == USERNAME and password == PASSWORD", "def authenticate(self, username: str, password: str) -> Optional[str]:", "def authenticate(self, username: str, password: str) -> Optional[str]:", "def check_auth(username, password):\n return password == os.getenv('PASSWORD')", "def check_auth(username, password):\n\n config = get_app_configurations()\n\n with open(config[\"credentials\"], \"r\") as fh:\n u, p = fh.readline().rstrip().split(\",\")\n\n return username == u and password == p", "def check_auth(username, password):\n return username == c.id and password == c.pw", "def test_credentials(self):\r\n data = self._deep_clean('zekebarge@gmail.com')\r\n error = data.get(ERROR_CODE, None)\r\n if error in (1,2):\r\n raise InvalidCredentialsError(\"Credentials are invalid for user '{}'\".format(self._username))\r\n return True", "def check_auth(username, password):\n return username == os.environ['USERNAME'] and password == os.environ['PASSWORD']", "def check_auth(username, password):\n return username == 'asimov' and password == 'tagada72'", "def check_auth(username, password):\n return username == app.config['USERNAME'] and (\n password == app.config['PASSWORD'])", "def check_credentials(input_password, real_password):\n return pwd_context.verify(input_password, real_password)", "def check_auth(username, password):\n # return username == app.config['USER'] and password == app.config['PASS']\n\n return username == app.config['USER'] and password == app.config['PASS']", "def check_auth(username, password):\n return basic_login(username, password)", "def check_auth(username, password):\n return username == 'sammy' and password == 'BasicPassword!'", "def validate_auth(username: str, password: str) -> Optional[str]:\n gate = Authentication.create_gateway()\n java_import(gate.jvm, \"com.splicemachine.shiro.SpliceDatabaseRealm.*\")\n realm = gate.jvm.com.splicemachine.shiro.SpliceDatabaseRealm()\n LOGGER.debug('Connection successful')\n realm.setServerName(os.environ['DB_HOST'])\n realm.setServerPort(\"1527\")\n realm.setDatabaseName(\"splicedb\")\n # when shiro authentication fails, it throws an error\n try:\n LOGGER.debug('Attempting login')\n realm.initialize(username, password)\n except Py4JJavaError as e:\n LOGGER.info('Login Failed')\n LOGGER.info(f'{e.errmsg}-{type(e)}: {e.java_exception}')\n return None\n LOGGER.debug('Login successful')\n return username", "def check_auth(username, password):\n return (username == app.config['USERNAME'] and\n password == app.config['PASSWORD'])", "def check_valid(self, username, password):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"SELECT username,password \\\n FROM users WHERE username = %s\", (username,))\n credentials = cursor.fetchone()\n cursor.close()\n con.commit()\n con.close()\n if credentials is None:\n return False\n if username != credentials[0]:\n return False\n if sha256_crypt.verify(password, credentials[1]):\n return True\n return False", "def check_auth(username, password):\n return username == 'jeffkoons' and password == 'likesweirdbaloons'", "def check_auth(username, password):\n return username == 'aweber' and password == 'aweber1100'", "def test_correct_credentials(self):\n with self.subTest(\"Valid credentials\"):\n valid_credentials = self._encode_basic_credentials(\n self.web_user.username, \"my_password\"\n )\n response = self.client.get(\n self.url, HTTP_AUTHORIZATION=f\"Basic {valid_credentials}\"\n )\n self.assertEqual(response.status_code, 401)\n\n with self.subTest(\"Invalid credentials\"):\n invalid_credentials = self._encode_basic_credentials(\n self.web_user.username, \"not_the_correct_password\"\n )\n response = self.client.get(\n self.url, HTTP_AUTHORIZATION=f\"Basic {invalid_credentials}\"\n )\n self.assertEqual(response.status_code, 401)", "def check_auth(username, password):\n return username == 'nicholas' and password == ADMIN_PASS", "def check_credentials(self, username, password):\n user = None\n if username != \"\":\n # Calling DB and fetching userdetails\n user = userdetails_API_query(username)\n print \"id \", user['_id']\n if user != None:\n #u = app.config['BASIC_AUTH_USERNAME'] = user['username']\n #pwd = app.config['BASIC_AUTH_PASSWORD'] = user['pw_hash']\n # print \" u & pwd\",username\n if user['username'] == username and check_password_hash(user['pw_hash'], password):\n g.user = user['_id'], username, user['email']\n return True\n print \"g.user\", g.user\n return False", "def check_auth(username, password):\n return username == 'admin' and password == 'pebble'", "def test_validate_credentials(self):\n pass", "def auth(username, password):\n return username == password", "def validate_authentication(self, username, password):\n return self.user_table[username]['pwd'] == password", "def test_authenticate_invalid_password(self):\r\n print(\"Authenticate user invalid password (wrong)\")\r\n username = \"admin\"\r\n password = \"password9999\"\r\n\r\n self.assertEqual(self.system.authenticate(username, password), False)", "def check_auth(username, password):\n return username == get_env('UPLOAD_USER') and password == get_env('UPLOAD_PASSWORD')", "def check_credentials_typo(credentials):\n regex_username = r'^[\\w\\.\\-]{2,}$'\n regex_password = r'[^.]{4,10}$'\n\n if not match(regex_username, credentials['username']):\n raise ValueError('invalid username typo')\n\n if not match(regex_password, credentials['password']):\n raise ValueError('invalid password typo')", "def check_auth(username, password):\n return username == 'admin' and password == 'password'", "def check_auth(username, password):\n try:\n locust_username = os.environ['LOCUST_USER_NAME']\n locust_password = os.environ['LOCUST_PASSWORD']\n return username == locust_username and password == locust_password\n except:\n return True", "def login_user(username,password):\n \n return Credentials.verify_user(username,password)", "def check_auth(username, password):\n return username == 'admin' and password == 'worcester'", "def check_auth(username, password):\n return username == 'admin' and password == 'Passw0rd'", "def check_auth(username, password):\n return username == 'admin' and password == 'secret'", "def check_auth(username, password):\n return username == 'admin' and password == 'secret'", "def verify_credentials(self):\n try:\n self.api.VerifyCredentials()\n logging.info('Successfully verified')\n return True\n except TwitterError as e:\n logging.error('Error verifying credentials: %s', e.message[0]['message'])\n return False", "def check_auth(username, password):\n return get_ct_object(username, password) is not None", "def authenticate(self, username, password):\n return None", "def check_credentials(username, password):\n if not validate_username(username) or not validate_password(password):\n return False\n sql = \"SELECT password \" \\\n \"FROM users \" \\\n \"WHERE username=:username AND is_active=TRUE\"\n result = db.session.execute(sql, {\"username\": username})\n user = result.fetchone()\n if user is None:\n return False\n password_hash = user[0]\n if check_password_hash(password_hash, password):\n return True\n return False", "def check_auth(username, password):\n return username == 'admin' and password == 'root'", "def check_auth(username, password):\r\n return username == current_app.config['ADMIN_USERNAME'] \\\r\n and password == current_app.config['ADMIN_PASSWORD']", "def validate_authentication(self, username, password, handler):\n hash = md5(password).hexdigest()\n msg = \"Authentication failed.\"\n if not self.has_user(username):\n if username == 'anonymous':\n msg = \"Anonymous access not allowed.\"\n raise AuthenticationFailed(msg)\n if username != 'anonymous':\n if self.user_table[username]['pwd'] != hash:\n raise AuthenticationFailed(msg)", "def check_auth(_, http_password):\n return (password is not None) and (password == http_password)", "def validate_authentication(self, username, password, handler):\n user = connection.User.find_one({ 'email' : str(username) , 'apikey' : str(password)})\n if user is None:\n msg = \"Authentication failed.\"\n raise AuthenticationFailed(msg)", "def test_failure(self):\n \n result = self.authenticator.authenticate(\n username=u'thruflo', \n password=u'wrong'\n )\n self.assertTrue(result is None)", "def check_auth(username, password):\n return username == 'admin' and password == 'admin'", "def check_auth(username, password):\n return username == current_app.config['DOC_USERNAME'] and password == current_app.config['DOC_PASSWORD']", "def authorise_login(self, username, password):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"SELECT username,password \\\n FROM users WHERE password = %s\", (password,))\n credentials = cursor.fetchone()\n cursor.close()\n con.commit()\n con.close()\n if credentials is None:\n return False\n if username != credentials[0]:\n return False\n if password != credentials[1]:\n return False\n return True", "def _credentials_are_valid(self, username, password):\n LDAP_SERVER = 'ldap://xxx.xxx.xxx' # EDIT THIS\n LDAP_USERNAME = '%s@xxx.com' % username # EDIT THIS\n LDAP_PASSWORD = password\n\n try:\n # build a client\n ldap_client = ldap.initialize(LDAP_SERVER)\n # perform a synchronous bind\n ldap_client.set_option(ldap.OPT_REFERRALS, 0)\n ldap_client.simple_bind_s(LDAP_USERNAME, LDAP_PASSWORD)\n except ldap.INVALID_CREDENTIALS:\n ldap_client.unbind()\n # Wrong username or password\n return False\n except ldap.SERVER_DOWN:\n # AD server not available\n return False\n # all is well\n ldap_client.unbind()\n # Login successful\n return True", "def test_user1_method4():\n assert u.verify_password(USER_CREDENTIALS[\"password\"]), \"Password cannot verify properly\"", "def check_auth(username, password):\n ADMIN_USER = config.CONFIG_VARS['ADMIN_USER']\n ADMIN_PASS = config.CONFIG_VARS['ADMIN_PASS']\n return username == ADMIN_USER and password == ADMIN_PASS", "def check_auth(username, password):\n return username == 'daniel' and password == config['redis_auth_key']", "def test_authentication_success():\n d = Dexcom(USERNAME, PASSWORD)\n d._validate_account()\n d._validate_session_id()", "async def check_password(self, login, password):", "def verify_password(self, username, password):\n\n try:\n self.c.execute('SELECT password FROM profiles WHERE name=(?)', (username,))\n\n db_pw = self.c.fetchone()[0]\n print(password)\n\n return db_pw == password\n\n except TypeError:\n return False", "def authenticate():\n return Response('Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def check_credentials_validation(credentials):\n spec = {'_id': credentials['username'], 'password': credentials['password']}\n if not current_app.mongo.observer.users.find_one(spec):\n raise Unauthorized('invalid credentials')", "def authenticate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n user = self.username.data\n\n cur = get_cursor()\n if email_exists(cur, user):\n user = get_username(cur, user)\n\n if username_exists(cur, user):\n pw_hash = get_pw_hash(cur, user)\n\n if check_password(self.password.data, pw_hash):\n self.username.data = user\n return True\n\n return False", "def is_valid_login(self, username, password):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_IS_LOGIN_INFORMATION_VALID, username + '|' + password)", "def check_credentials(login_details):\n\n app.logger.info(\"Login Details: {}\".format(pformat(ModelTools.redact_dict(login_details))))\n\n username = login_details['username']\n password = login_details['password']\n\n if (username == 'admin@your.company' and password == 'pass1'):\n user = User.query.filter(User.email == username).one_or_none()\n schema = ExtendedUserSchema(many=False)\n if user is not None:\n app.logger.debug('LOGIN accepted!');\n schema = ExtendedUserSchema(many=False)\n data = schema.dump(user)\n return data, 200\n\n if (username == 'dev@your.company' and password == 'pass2'):\n user = User.query.filter(User.email == username).one_or_none()\n if user is not None:\n app.logger.debug('LOGIN accepted!');\n schema = ExtendedUserSchema(many=False)\n data = schema.dump(user)\n return data, 200\n\n app.logger.warning('LOGIN FAILED!');\n abort(\n 401, \"Unauthorised! {}\".format(ModelTools.redact_dict(login_details))\n )", "def validate_login(self, username, password):\n user = User(self).get(username)\n return user and user['Password'] == password", "def check_auth(username, password):\n return username == 'admin' and password in app.config[\"CLAIM_SECRETS\"]", "def verify_pw(username, password):\n global password_store\n logger = logging.getLogger('verify_pw')\n if not password_store:\n logger.error(\"No password store specified\")\n return False\n logger.debug(\"Verifying password for %s\" % username)\n return password_store.verify(username, password)", "def invalid_credentials( form , field ): \n\tusername_entered = form.username.data\n\tpassword_entered = field.data \n\tuser_object = User.query.filter_by(username = username_entered).first()\n\tif user_object is None : \n\t\traise ValidationError(\"Username or Password is incorrect !\")\n\telif not pbkdf2_sha256.verify(password_entered , user_object.password) : \n\t\traise ValidationError(\"Username or Password is incorrect !\")", "def authenticate(username, password):\n test = User.load(username)\n test_password = test.password\n input_password = md5.new(\n password + config.get('security', 'salt')).digest()\n if input_password == test_password:\n return True\n else:\n return False", "def verify_password(username, password):\n if username in user_auth and check_password_hash(user_auth.get(username), password):\n return username", "def authenticate(credentials):", "def has_credentials(self):\n return self.username and self.password and self.url and self.xml_rpc", "def validate(username: str, password: str) -> dict:\n validation_check = {}\n \n # Check to see if the username was left blank\n if username.strip() == \"\":\n validation_check[\"success\"] = False\n validation_check[\"username\"] = \"Username cannot be left blank.\"\n\n # Check to see if the username is taken\n elif not sql.is_username_taken(username):\n validation_check[\"success\"] = False\n validation_check[\"username\"] = \"Username is incorrect\"\n\n # Check to see if the password was left blank\n if password.strip() == \"\":\n validation_check[\"success\"] = False\n validation_check[\"password\"] = \"Password cannot be left blank.\"\n\n\n if not validation_check.get(\"success\", True):\n return validation_check\n\n else:\n return sql.verify_credentials(username, password)", "def authenticate(self, username, password):\n user = self.db.get_user(username)\n print(user)\n\n if user is None:\n self.__deny_state()\n\n if not self.argon2.verify(user[1], password):\n self.__deny_state()\n\n self.__accept_state()", "def authenticate():\n\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n get_auth_headers())", "def authenticate(username, password):\r\n global _username\r\n global _password\r\n _username = username\r\n _password = _hash(password)\r\n try:\r\n r = _call_function('pico', 'authenticate', locals())\r\n return True\r\n except Exception, e:\r\n r = str(e)\r\n if r.startswith('Bad nonce.'):\r\n global _td\r\n _td = int(r.split('Bad nonce. The time difference is:')[-1])\r\n print(r)\r\n authenticate(username, password)\r\n else:\r\n print(r)\r\n return False", "def authenticate():\r\n return Response(\r\n 'Could not verify your access level for that URL.\\n'\r\n 'You have to login with proper credentials', 401,\r\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials',\n 401,\n {\n 'WWW-Authenticate': 'Basic realm=\"Login Required\"'\n }\n )", "def auth(self, username, password):\n return False", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def test_authenticate_invalid_username(self):\r\n print(\"Authenticate user invalid username\")\r\n username = \"test9999user\"\r\n password = \"password\"\r\n\r\n self.assertEqual(self.system.authenticate(username, password), False)", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})" ]
[ "0.7673481", "0.7532174", "0.74800336", "0.7391194", "0.7332672", "0.72637135", "0.72405916", "0.7238133", "0.7238133", "0.7210242", "0.7194319", "0.71895075", "0.7168887", "0.71352327", "0.712828", "0.71126574", "0.71101016", "0.7096003", "0.70950186", "0.7062774", "0.70513785", "0.7039052", "0.7000648", "0.6988051", "0.6953707", "0.6937204", "0.69354504", "0.6924307", "0.6914179", "0.69131744", "0.6891133", "0.6869527", "0.684181", "0.6833909", "0.6831865", "0.6812353", "0.6811999", "0.6803938", "0.6794219", "0.6790416", "0.67850673", "0.67850673", "0.6783729", "0.67788535", "0.6778047", "0.6774992", "0.6768805", "0.67635274", "0.6741905", "0.6738866", "0.67358595", "0.67348063", "0.67304784", "0.6717364", "0.6681482", "0.6650141", "0.6647545", "0.6635553", "0.663139", "0.6630542", "0.662339", "0.6613164", "0.6606401", "0.66044044", "0.65881824", "0.65871423", "0.65816045", "0.6571707", "0.6561072", "0.655565", "0.65455484", "0.65385383", "0.65351784", "0.6534942", "0.65266854", "0.65232325", "0.6519317", "0.65116113", "0.65019107", "0.6495118", "0.647962", "0.6469697", "0.64677703", "0.64635265", "0.64635265", "0.64635265", "0.64635265", "0.64635265", "0.64635265", "0.64635265", "0.64635265", "0.64614856", "0.64518344", "0.64518344", "0.64518344", "0.64518344", "0.64518344", "0.64518344", "0.64518344", "0.64518344", "0.64518344" ]
0.0
-1
Initialize component which does the ligand depiction. If Nones is provided as parameters just the defalt RDKit functionality is going to be used.
def __init__( self, pubchem_templates_path: str = "", general_templates_path: str = config.general_templates, ) -> None: self.coordgen_params = rdCoordGen.CoordGenParams() self.coordgen_params.coordgenScaling = 50 / 1.5 self.coordgen_params.templateFileDir = config.coordgen_templates self.pubchem_templates = ( pubchem_templates_path if os.path.isdir(pubchem_templates_path) else "" ) self.templates: Dict[str, rdkit.Chem.rdchem.Mol] = OrderedDict() if os.path.isdir(general_templates_path): for k in sorted(os.listdir(general_templates_path)): template = self._load_template(os.path.join(general_templates_path, k)) template_name = k.split(".")[0] self.templates[template_name] = template
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(self):\n self.ros.enable()\n self.phone_link.enable()", "def __init__(self, lbda=None, bandname=None, zp=None, \n mjd=None, empty=False,**kwargs):\n self.__build__()\n if empty:\n return\n prop = kwargs_update(dict(lbda=lbda, bandname=bandname,mjd=mjd, zp=zp),\n **kwargs)\n self.create(**prop)", "def initialization_labyrinth(self):\n\n self.set_datas_from_labyrinth()\n self.set_connection_between_nodes()\n self.set_labyrinth_statistics_from_labyrinth()", "def initiate(self, DNA, Pol, Hel):", "def __init__ (self, nTel=0, centroid=(-0.5, 0.5), length=(0.005, 0.09), width=(0.0005, 0.003), psi=(0, 360), nsb=(10, 60)) :\n super().__init__ (nTel)\n self.centroid = [centroid, centroid]\n self.length, self.width = length, width\n self.psi = psi\n self.nsb = nsb\n self.tab_inj, self.mat_event, self.geom = None, None, None\n self.load_telescope()", "def __init__(self, dualgan:nn.Module, l_adv:float=1., l_rec:float=1., l_idt:float=0.):\n super().__init__()\n store_attr()", "def __init__(self, list_of_rbms):\n super(DBN, self).__init__(list_of_layers=list_of_rbms)", "def __init__(self, parent=None):\n ScriptedLoadableModuleWidget.__init__(self, parent)\n VTKObservationMixin.__init__(self) # needed for parameter node observation\n self.logic = None\n self._parameterNode = None\n self.T1_LLN_Node = None\n self.T1_LLE_Node = None\n self.ECVMapNode = None\n self.LLE_Node = None\n self.LLN_Node = None\n self.ArefNode = None\n self.T1_LLE_Name = 'T1 Enhanced'\n self.T1_LLN_Name = 'T1 Native'\n self.ResetSliceViews()\n self.LinkSlices()\n self.ColorBarEnabled()\n self.setupVolumeNodeViewLayout()\n self.Warning = True", "def __init__(self, \n xml_path=cfg.PERSON_REID_XML,\n bin_path=cfg.PERSON_REID_BIN):\n self.__net = cv.dnn.readNet(xml_path, bin_path)\n self.__net.setPreferableTarget(cv.dnn.DNN_TARGET_MYRIAD)", "def __init__(self, native_state=None,\n cutoff_rmsd=0.2,\n initial_states=None,\n initial_weights=None,\n ligand_idxs=None,\n binding_site_idxs=None,\n **kwargs):\n\n super().__init__(initial_states=initial_states,\n initial_weights=initial_weights,\n ligand_idxs=ligand_idxs,\n receptor_idxs=binding_site_idxs\n **kwargs)\n\n # test inputs\n assert native_state is not None, \"Must give a native state\"\n assert type(cutoff_rmsd) is float\n\n native_state_d = native_state.dict()\n\n # save the native state and center it around it's binding site\n native_state_d['positions'] = center_around(native_state['positions'], binding_site_idxs)\n\n native_state = WalkerState(**native_state_d)\n\n # save attributes\n self._native_state = native_state\n self._cutoff_rmsd = cutoff_rmsd", "def __init__(self, lead):\n self.lead = lead\n\n if not self.lead.is_1DNN():\n raise ValueError(\"Not a 1D NN tight binding\")\n if not self.lead.is_square():\n raise ValueError(\"Not a square TightBinding\")\n\n self.gf_r = None", "def __initialize_nlp(self, nlp):\n nlp[\"nbQ\"] = 0\n nlp[\"nbQdot\"] = 0\n nlp[\"nbTau\"] = 0\n nlp[\"nbMuscles\"] = 0\n nlp[\"plot\"] = {}\n nlp[\"var_states\"] = {}\n nlp[\"var_controls\"] = {}\n nlp[\"CX\"] = self.CX\n nlp[\"x\"] = nlp[\"CX\"]()\n nlp[\"u\"] = nlp[\"CX\"]()\n nlp[\"J\"] = []\n nlp[\"g\"] = []\n nlp[\"g_bounds\"] = []\n nlp[\"casadi_func\"] = {}", "def init(self):\n self.l_motor = lazytalonsrx.LazyTalonSRX(Constants.IL_MOTOR_ID)\n self.r_motor = lazytalonsrx.LazyTalonSRX(Constants.IR_MOTOR_ID)\n self.l_motor.initialize(\n inverted=False, encoder=False, phase=False, name=\"Intake Left\")\n self.r_motor.initialize(\n inverted=True, encoder=False, phase=False, name=\"Intake Right\")", "def __init__(self, flag_band: FlagBand,\n layer: \"datacube_ows.ows_configuration.OWSNamedLayer\") -> None:\n super().__init__({})\n self.layer = layer\n self.bands: Set[str] = set()\n self.bands.add(flag_band.pq_band)\n self.flag_bands = {flag_band.pq_band: flag_band}\n self.product_names = tuple(flag_band.pq_names)\n self.ignore_time = flag_band.pq_ignore_time\n self.declare_unready(\"products\")\n self.declare_unready(\"low_res_products\")\n self.manual_merge = flag_band.pq_manual_merge\n self.fuse_func = flag_band.pq_fuse_func\n # pyre-ignore[16]\n self.main_product = self.products_match(layer.product_names)", "def __init__(self):\n\n super().__init__(\n dynamics_model=DoorDynamicsModel(),\n virtual_sensor_model=UnimodalVirtualSensorModel(\n virtual_sensor_model=[\n DoorVirtualSensorModel(modalities={\"image\"}),\n DoorVirtualSensorModel(modalities={\"pos\", \"sensors\"}),\n ],\n state_dim=3,\n ),\n )", "def __init__(self, rings=False, branches=False):\n self.rings = rings\n self.branches = branches", "def __init__(self, ns):\n\n ns.sort()\n self.ns = ns\n self.ngrams = {}\n self.feature_list = []\n self.name = 'NgramExtractorBase'", "def __init__(self,\n lowercase=False,\n ngram_range=1,\n unknown_label_id=0,):\n self._lowercase = lowercase\n self._ngram_range = ngram_range\n self._unknown_label_id = unknown_label_id", "def __init__(self, sgrna_fc, library, copy_number, exclude_heterochromosomes=False):\n self.library = library\n self.library.index.name = \"index\"\n\n self.sgrna_fc = sgrna_fc\n self.copy_number = copy_number\n self.gpr = None\n self.exclude_heterochromosomes = exclude_heterochromosomes", "def __init__(self, initial_states=None,\n initial_weights=None,\n ligand_idxs=None,\n receptor_idxs=None,\n **kwargs):\n\n # make sure necessary inputs are given\n assert initial_states is not None, \"Must give a set of initial states\"\n assert ligand_idxs is not None, \"Must give ligand indices\"\n assert receptor_idxs is not None, \"Must give binding site indices\"\n\n self._initial_states = initial_states\n self._ligand_idxs = ligand_idxs\n self._receptor_idxs = receptor_idxs\n\n # we want to choose initial states conditional on their\n # initial probability if specified. If not specified assume\n # assume uniform probabilities.\n if initial_weights is None:\n self._initial_weights = [1/len(initial_states) for _ in initial_states]\n else:\n self._initial_weights = initial_weights", "def __init__(self):\n\n super().__init__(\n filter_models=[\n DoorKalmanFilter(\n dynamics_model=DoorDynamicsModel(),\n virtual_sensor_model=DoorVirtualSensorModel(modalities={\"image\"}),\n ),\n DoorKalmanFilter(\n dynamics_model=DoorDynamicsModel(),\n virtual_sensor_model=DoorVirtualSensorModel(\n modalities={\"pos\", \"sensors\"}\n ),\n ),\n ],\n state_dim=3,\n )", "def __init__(self, nclasses, device):\n super(HybridNN, self).__init__(nclasses, device)\n self.data_dev = qml.device(device, wires=self.req_qub_out)\n self.device = device\n self.model_dev = None\n self.nn = None\n self.bias = True", "def __init__ ( self ) :\n\n self.m_dn = self.configInt ('dn', 100)\n self.m_print_bits = self.configInt ('print_bits', 1)\n\n if self.m_print_bits & 1 : self.print_input_pars()\n\n self.counter = 0\n self.counter_dn = 0", "def Initialize(self):\n return _gmat_py.NadirPointing_Initialize(self)", "def initialize_dlib(facial_landmark_predictor:str):\r\n print('Loading facial landmark predictor...')\r\n detector = dlib.get_frontal_face_detector()\r\n predictor = dlib.shape_predictor(FACIAL_LANDMARK_PREDICTOR)\r\n\r\n return detector, predictor", "def initialize():\n dislin.disini()", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(3)\n ]\n self.norms = ModuleList(norms_list)", "def setupLL_Native(self):\n self.LLN_Selector = slicer.qMRMLNodeComboBox()\n self.LLN_Selector.nodeTypes = ['vtkMRMLMultiVolumeNode']\n self.LLN_Selector.noneEnabled = True\n self.LLN_Selector.setMRMLScene(slicer.mrmlScene)\n self.LLN_Selector.addEnabled = 0\n self.LLN_SelectorLabel = qt.QLabel('Native Look Locker')\n self.LLN_Selector.setToolTip(\"Select the pre contrast Look Locker to create the T1 Mapping\")\n self.InputOutput_Layout.addRow(self.LLN_SelectorLabel, self.LLN_Selector)", "def __init__(self, initial_state=None,\n cutoff_distance=1.0,\n topology=None,\n ligand_idxs=None,\n receptor_idxs=None,\n periodic=True,\n **kwargs):\n\n # since the super class can handle multiple initial states we\n # wrap the single initial state to a list.\n super().__init__(initial_states=[initial_state],\n ligand_idxs=ligand_idxs,\n receptor_idxs=receptor_idxs,\n **kwargs)\n\n # test input\n assert topology is not None, \"Must give a reference topology\"\n assert type(cutoff_distance) is float\n\n self._cutoff_distance = cutoff_distance\n self._topology = topology\n\n # convert the json topology to an mdtraj one\n self._mdj_top = json_to_mdtraj_topology(self._topology)\n\n # whether or not to use the periodic box vectors in the\n # distance calculation\n self._periodic = periodic", "def init_geofence(self):\n\t\tself.create_ring()\n\t\tself.create_geofence()", "def __init__(self, atmo_fln, data_fln, ndiv, read=True, oldchi=False):\n # Calling the parent class\n Photometry.__init__(self, atmo_fln, data_fln, ndiv, read=read)\n self._Init_lightcurve(ndiv)", "def __init__(self):\n # Lynx Dimensions in mm\n self.L1 = 76.2 # distance between joint 0 and joint 1\n self.L2 = 146.05 # distance between joint 1 and joint 2\n self.L3 = 187.325 # distance between joint 2 and joint 3\n self.L4 = 34 # distance between joint 3 and joint 4\n self.L5 = 34 # distance between joint 4 and center of gripper\n\n # Joint limits\n self.lowerLim = np.array([-1.4, -1.2, -1.8, -1.9, -2.0, -15]).reshape((1, 6)) # Lower joint limits in radians (grip in mm (negative closes more firmly))\n self.upperLim = np.array([1.4, 1.4, 1.7, 1.7, 1.5, 30]).reshape((1, 6)) # Upper joint limits in radians (grip in mm)", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(2)\n ]\n self.norms = ModuleList(norms_list)", "def __init__(self, plasma_parent):\n super(LevelNumberDensityHeNLTE, self).__init__(plasma_parent)\n self.calculate = self._calculate_helium_nlte\n self._update_inputs()\n self.initialize_indices = True", "def initNode():\n\n # 0) General Setup\n #initialize listener node!\n rospy.init_node('main', anonymous=True)\n\n #Create instances of subscriber objects\n joint_state_sub = rospy.Subscriber(\"joint_states\", JointState, joint_state_callback)", "def __init__(self, *args):\n _snap.TFltKd_swiginit(self, _snap.new_TFltKd(*args))", "def __init__(self, input_dim):\n super(ps_FNNDenoiser, self).__init__()\n\n self._input_dim = input_dim\n\n self.fnn_enc = Linear(self._input_dim, int(self._input_dim / 2))\n self.fnn_dec = Linear(int(self._input_dim / 2), self._input_dim)\n\n self.initialize_module()", "def __init__(self, resonanceParameters=None, scatteringRadius=None, **kwargs):\n\n index = 0\n for attr in self.optAttrList:\n setattr( self, attr, kwargs.get(attr) )\n if self.computeAngularDistribution:\n self.computeAngularDistribution = bool(self.computeAngularDistribution)\n\n self.resonanceParameters = resonanceParameters or []\n if self.resonanceParameters:\n self.resonanceParameters.setAncestor( self )\n self.scatteringRadius = scatteringRadius\n if self.scatteringRadius: self.scatteringRadius.setAncestor( self )\n ancestryModule.ancestry.__init__( self )", "def _init_layers(self):\n self._init_predictor()\n if self.use_edge_fusion:\n self._init_edge_module()", "def __init__(\n self,\n operon_id: str,\n left_right: Tuple[int, int],\n strand: int, \n reference_sequence: Seq, \n name: str = None\n ):\n\n super().__init__(\n 'operon',\n left_right=left_right,\n strand=strand,\n reference_sequence=reference_sequence,\n name=name\n )\n self.id = operon_id\n self.reading_frame = get_reading_frame(self.location, len(reference_sequence))\n\n # this is only set after link_transcription_unit is run at least once\n self.transcription_units = []", "def __init__(self):\n\n ser_path = get_project_path() + '/nltk_libs/english.all.3class.distsim.crf.ser'\n jar_path = get_project_path() + '/nltk_libs/stanford-ner-3.8.0.jar'\n\n self.st = StanfordNERTagger(ser_path, jar_path)", "def __init__(self):\n super().__init__()\n self.p = None\n self.type = 'NDInverseWeight'", "def __init__(self, kBoundedRing):\n KBoundedQuotientBasis.__init__(self, kBoundedRing, 'dks')\n\n kHLP = kBoundedRing.kHallLittlewoodP()\n self.module_morphism(self._dks_to_khlp_on_basis,codomain=kHLP).register_as_coercion() # morphism from dual-k-Schurs to k-bounded-HLP\n kHLP.module_morphism(self._khlp_to_dks_on_basis,codomain=self).register_as_coercion() # morphism from k-bounded-HLP to dual-k-Schurs", "def __init__(self, strName, lstDomain):\n # assign the name of the feature represented by the node\n self.name = str(strName)\n # assign the domain of the feature\n self.domain = lstDomain\n # the value starts out as undefined\n self.value = \"none\"", "def __init__(self, *args):\n super(LinearND, self).__init__()\n self.fc = nn.Linear(*args)", "def __init__(self, por): \n logger.debug(\"Entering in ocentricWKT constructor\") \n super(OcentricWKT, self).__init__(\n por.getElement(OcentricMetadata.GEO_GCS_NAME),\n por.getElement(OcentricMetadata.DATUM_NAME),\n por.getElement(OcentricMetadata.ELLIPSOIDE_NAME),\n por.getElement(OcentricMetadata.RADIUS),\n por.getElement(OcentricMetadata.INVERSE_FLATTENING),\n por.getElement(OcentricMetadata.AUTHORITY_NAME),\n por.getElement(OcentricMetadata.AUTHORITY_CODE)\n ) \n logger.debug(\"Exiting from ocentricWKT constructor\")", "def __init__(self, *args):\n _snap.TModeNetNodeI_swiginit(self, _snap.new_TModeNetNodeI(*args))", "def antenny_init_components(self):\n if self.antenny_config is None:\n print(\"Please load a config before initializing components\")\n if not self.antenny_config.check():\n print(\"Config {} is not valid, failed to initialize\".format(self.antenny_config.get_name()))\n print(\"If you believe this is an error, or you have modified the base components of the antenny board, \"\n \"please check Config class as well as the default configs for more details.\")\n\n self.imu_init()\n self.pwm_controller_init()\n self.elevation_servo_init()\n self.azimuth_servo_init()\n self.screen_init()\n self.gps_init()\n self.telemetry_init()\n self.platform_init()", "def __init__(self, num_radial, cutoff, envelope_exponent=6) -> None:\n\n super(BesselBasisLayer, self).__init__()\n self.cutoff = cutoff\n self.envelope = Envelope(envelope_exponent)\n\n self.freq = torch.nn.Parameter(torch.Tensor(num_radial))\n\n self.reset_parameters()", "def initialise(self, **kwargs):\n pass", "def __init__(self):\n super(GELU, self).__init__()", "def initialize(self, butler):\n self.getRawMd(butler)\n self.getCalexp(butler)\n self.getPfsArm(butler)", "def __init__(self, name, nlp):\n if not Doc.has_extension(\"flesch_kincaid_grade_level\"):\n Doc.set_extension(\"flesch_kincaid_grade_level\", getter=self.fk_grade)\n\n if not Doc.has_extension(\"flesch_kincaid_reading_ease\"):\n Doc.set_extension(\"flesch_kincaid_reading_ease\", getter=self.fk_ease)\n\n if not Doc.has_extension(\"dale_chall\"):\n Doc.set_extension(\"dale_chall\", getter=self.dale_chall)\n\n if not Doc.has_extension(\"smog\"):\n Doc.set_extension(\"smog\", getter=self.smog)\n\n if not Doc.has_extension(\"coleman_liau_index\"):\n Doc.set_extension(\"coleman_liau_index\", getter=self.coleman_liau)\n\n if not Doc.has_extension(\"automated_readability_index\"):\n Doc.set_extension(\"automated_readability_index\", getter=self.ari)\n\n if not Doc.has_extension(\"forcast\"):\n Doc.set_extension(\"forcast\", getter=self.forcast)\n\n if not Doc.has_extension(\"word_entropy\"):\n Doc.set_extension(\n \"word_entropy\", getter=lambda x: self.word_entropy(x, lemmatized=False)\n )\n\n if not Doc.has_extension(\"word_entropy_l\"):\n Doc.set_extension(\n \"word_entropy_l\", getter=lambda x: self.word_entropy(x, lemmatized=True)\n )\n\n if not Doc.has_extension(\"lix\"):\n Doc.set_extension(\"lix\", getter=self.lix)", "def __init__(self, n_components=None):\n self.n_components = n_components", "def __init__(self, *args):\n _snap.TModeNet_swiginit(self, _snap.new_TModeNet(*args))", "def __init__(self,layers,activations):\n model = utils.buildMLP(layers, activations)\n super().__init__(torch.nn.Sequential(model), nnType='dnn')", "def __init__(self, *args):\n _snap.TUndirNetNodeI_swiginit(self, _snap.new_TUndirNetNodeI(*args))", "def __init__(self):\n self.lattices = []\n self.meshfns = []", "def __init__(self, *args):\n _snap.TNEANetAFltI_swiginit(self, _snap.new_TNEANetAFltI(*args))", "def __init__(self, **kwargs):\n name, parameters, enabled, kwargs = util.directivenode_kwargs(kwargs)\n self.name = name\n self.parameters = parameters\n self.enabled = enabled\n\n super().__init__(**kwargs)", "def initialize_default(self):\n self.initialize_navigation()\n self.initialize_viewport()", "def initial_representations():\n cmd.hide('everything', 'all')\n cmd.show('cartoon', 'all')\n cmd.select('ligand', 'resn NFT')\n cmd.deselect()\n cmd.show(\"sticks\", \"ligand\")", "def initialize(self, *args, **kwargs):\n pass", "def __init__(self, params):\r\n _params = {\r\n 'min_pct': 75.0,\r\n 'min_len': 150,\r\n 'blast_db': None,\r\n 'template_filepath': None,\r\n 'pairwise_alignment_method': 'blast',\r\n 'Application': 'PyNAST',\r\n 'Algorithm': 'NAST',\r\n }\r\n _params.update(params)\r\n Aligner.__init__(self, _params)", "def __init__(self, shine_dalgarno_id: str, left_right: Tuple[int, int], gene_id: str):\n super().__init__('shine_dalgarno', left_right=left_right)\n self.id = shine_dalgarno_id\n self.gene_id = gene_id\n\n # populated by link_gene only\n self.gene = None", "def __init__(self, code, name=None, llh=None, xyz=None, xieta=None, geoidhgt=None):\n self._code = code\n self._name = name or code\n self._xyz = None\n self.setXiEta(xieta)\n self.setGeoidHeight(geoidhgt)\n self._xyz = None\n if llh is not None:\n self.setLatLonHgt(llh)\n elif xyz is not None:\n self.setXYZ(xyz)", "def __init__(self, *args):\n _snap.TFltStrKd_swiginit(self, _snap.new_TFltStrKd(*args))", "def __init__(self, *args, **kwargs):\n super(LinlLis, self).__init__(\n ('linl', Bits(maxlen=4)),\n ('lis', Bits(maxlen=4)),\n *args, **kwargs\n )", "def InitLightBasic(self):\r\n\t\t\r\n\t\taLight = AmbientLight(\"AmbientLight\")\r\n\t\taLight.setColor(Vec4(0.3, 0.3, 0.3, 1))\r\n\t\trender.setLight(render.attachNewNode(aLight))\r\n\t\r\n\t\tdLight1 = DirectionalLight(\"DirectionalLight1\")\r\n\t\tdLight1.setColor(Vec4(0.65, 0.6, 0.6, 1))\t\t\r\n\t\tdLight1NP = render.attachNewNode(dLight1)\r\n\t\tdLight1NP.setHpr(100, -40, 0)\r\n\t\trender.setLight(dLight1NP)\r\n\t\r\n\t\tdLight2 = DirectionalLight(\"DirectionalLight2\")\r\n\t\tdLight2.setColor(Vec4(0.35, 0.35, 0.3, 1))\r\n\t\tdLight2NP = render.attachNewNode(dLight2)\r\n\t\tdLight2NP.setHpr(150, -60, 0)\r\n\t\trender.setLight(dLight2NP)", "def __init__(self, n_components):\n self.n_components = n_components", "def _initialize(self):\n \n self.view.lineEdit_3.setText(\"C,H,N,O,P,S\")\n self.view.spin_hit.setValue(20)\n self.view.lineEdit_2.setValue(10.)\n self.view.checkBox_8.setChecked(True)", "def _pre_init(self, **kwargs) -> None:\n self._pan_res = 0.5\n self._ms_res = 2.0\n self.needs_extraction = False\n self._proj_prod_type = [Sv1ProductType.L1B]\n self._raw_units = RawUnits.DN\n\n # Post init done by the super class\n super()._pre_init(**kwargs)", "def __init__(self, use_nonlinear_noise_model=False, scale=[1,1,1]):\n self.use_nonlinear_noise_model=use_nonlinear_noise_model\n self.scale = scale\n super(CasadiUnicycleDynamics, self).__init__(3, 2)", "def __init__(self,opt):\n super(SNPatchDiscriminator, self).__init__()\n # if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n # use_bias = norm_layer.func == nn.InstanceNorm2d\n # else:\n # use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n n_layers = 3\n ndf = opt.ndf\n use_bias = True\n sequence = [nn.utils.spectral_norm(nn.Conv2d(opt.input_nc, ndf, kernel_size=kw, stride=2, padding=padw)), nn.LeakyReLU(0.2, True)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 4)\n sequence += [\n nn.utils.spectral_norm(nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias)),\n # norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n self.model = nn.Sequential(*sequence)", "def initialise(self):", "def init(self, parameters):\n pass", "def initGrid( self, name, suffix, n, ni, nj, ifields=[],rfields=[], lsize=10):\n #print \"initGrid: initializing %s\"%(name)\n self.name = name\n self.suffix = suffix\n self.n = n\n self.ni = ni\n self.nj = nj\n self.ifields = ifields\n self.rfields = rfields\n #print \"ifields=%s\\nrfields=%s\\nlsize=%d\"%(temp_ifields, temp_rfields, lsize)\n self.lgrid = attributevector.AttributeVector( ifields, rfields, lsize )\n #print \"allocating a temp array...\"\n temp = Numeric.zeros( lsize, Numeric.Float64 )\n #temp = -9999.0\n #print \"Filling real fields with default values...\"\n for f in rfields:\n #print \"\\tFilling field\",f,\":\",\n self.lgrid.importRAttr( f, temp )\n #print \"... OK!\"\n print \"initGrid: Initialized Grid!\"\n # setup av complete\n return", "def __init__(self, lemmas_info):\n self._lemmas_info = lemmas_info\n self._graph = self._create_nx_graph()", "def __init__(self, *args):\n _snap.TUndirNet_swiginit(self, _snap.new_TUndirNet(*args))", "def __init__ ( self , syms , defn ):\n\n self.logic = cognitiveDefiner.convertDefinition(syms,defn)", "def initialise_network(self):\n raise NotImplementedError", "def init():\n return _nfc.init()", "def initialize(self):\n\t\tpass", "def __init__(self, radius, length, power, pnnl_mats, thick_refl=15):\n self.z = length\n self.r = radius\n self.refl_t = thick_refl\n self.Q_therm = power * self.kW_to_MW\n self.matlib = pnnl_mats", "def __init__(self, nside, weights_map, lmax_factor=1.5, mask=None):\n self.params = sc.Parameters(nside, lmax_factor)\n self.weights_map = weights_map\n if mask is not None:\n self.mask = sc.Mask(mask, nside)\n else:\n self.mask = sc.Mask(np.ones(self.params.npix))", "def __init__(self, *args):\n _snap.TStrFltKd_swiginit(self, _snap.new_TStrFltKd(*args))", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def initialize(self): \r\n pass", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def __init__(self):\n\t\tself.clust_idx_part = -1\n\t\t\"\"\"\n\t\tthis list stores the indices of input trees having this taxon\n\t\t\"\"\"\n\t\tself.Support_Tree_List = []", "def initialize(self, **kwargs):", "def __init__(self, nsamples):\n super(ANsDiscovery, self).__init__()\n self.select_rate = cfg.ANs_select_rate\n self.ANs_size = cfg.ANs_size\n self.register_buffer('samples_num', torch.tensor(nsamples))\n self.register_buffer('anchor_indexes', torch.LongTensor([]))\n self.register_buffer('instance_indexes', torch.arange(nsamples).long())\n self.register_buffer('position', -1 * torch.arange(nsamples).long() - 1)\n self.register_buffer('neighbours', torch.LongTensor([]))\n self.register_buffer('entropy', torch.FloatTensor(nsamples))\n self.register_buffer('consistency', torch.tensor(0.0))", "def init(self, box: list, **kw):\n self.BOX = box\n\n if self.dataset_id == \"phy\":\n self.definition = \"Ifremer erddap Argo data fetcher for a space/time region\"\n elif self.dataset_id == \"ref\":\n self.definition = (\n \"Ifremer erddap Argo REFERENCE data fetcher for a space/time region\"\n )\n\n return self", "def __init__(self, *args):\n _snap.TNGraph_swiginit(self, _snap.new_TNGraph(*args))", "def __init__(self, plasma_parent):\n super(LevelNumberDensity, self).__init__(plasma_parent)\n self.calculate = self._calculate_dilute_lte\n self._update_inputs()\n self.initialize_indices = True", "def __init__(self, initial_guess=(), InitLineaire = False):\n\n # TODO: Add the capability to initialize using initial and final frame that linearly complete between\n self.first_node_init = list(initial_guess)\n self.init = list(initial_guess)\n self.last_node_init = list(initial_guess)\n self.InitLineaire = InitLineaire", "def __init__(self, *args):\n _snap.TDirNetNodeI_swiginit(self, _snap.new_TDirNetNodeI(*args))", "def __init__(self):\n super().__init__()\n self.location = 0.0\n self.scale = 1.0\n self.type = 'Laplace'\n self.distType = 'Continuous'\n self.hasInfiniteBound = True\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self):\r\n self.label = \"Geodesic Densification using arcpy\"\r\n self.description = \"Densifies geometries along geodesic lines\"\r\n self.canRunInBackground = False", "def __init__(self, *args):\n _snap.TDirNet_swiginit(self, _snap.new_TDirNet(*args))", "def init_weight(self):\n init_bn(self.norm0)" ]
[ "0.56644416", "0.5561239", "0.55297834", "0.54553896", "0.5450353", "0.5445349", "0.5430579", "0.5426074", "0.54253256", "0.5354728", "0.53483224", "0.5339778", "0.5335766", "0.5325919", "0.5325185", "0.5315453", "0.52921635", "0.52817094", "0.52803695", "0.5268682", "0.526729", "0.5260079", "0.5258252", "0.52578706", "0.525784", "0.5257382", "0.52543855", "0.5253188", "0.52404463", "0.5238171", "0.5225983", "0.5225285", "0.52226746", "0.52174944", "0.5215178", "0.5212149", "0.5206707", "0.5203155", "0.5201298", "0.5200306", "0.51989233", "0.519574", "0.5192599", "0.519109", "0.51882553", "0.5188026", "0.5182187", "0.5173868", "0.51735705", "0.5166932", "0.5166574", "0.5166139", "0.51591915", "0.51563716", "0.51448333", "0.51391494", "0.5130209", "0.5129199", "0.51278126", "0.51266944", "0.51257247", "0.5125394", "0.51184165", "0.5114751", "0.5107459", "0.51045775", "0.5103595", "0.51035273", "0.509663", "0.50923896", "0.50914073", "0.5091369", "0.5090376", "0.5090312", "0.5089024", "0.5084095", "0.50809246", "0.5073653", "0.5072752", "0.5070234", "0.5067159", "0.506076", "0.5059642", "0.5057582", "0.5052062", "0.50505424", "0.50505394", "0.50482094", "0.50451505", "0.5044304", "0.50439507", "0.50390214", "0.5039009", "0.50350386", "0.50341433", "0.50330144", "0.5032147", "0.5031543", "0.50311685", "0.5028955", "0.5026821" ]
0.0
-1
Given input molecule tries to generate its depictions.
def depict_molecule( self, het_id: str, mol: rdkit.Chem.rdchem.Mol ) -> DepictionResult: temp_mol = Chem.RWMol(mol) templateMol = Chem.RWMol(temp_mol).GetMol() pubchemMol = Chem.RWMol(temp_mol).GetMol() rdkitMol = Chem.RWMol(temp_mol).GetMol() results = [] pubchem_res = ( self._get_2D_by_pubchem(het_id, pubchemMol) if self.pubchem_templates else None ) template_res = self._get_2D_by_template(templateMol) if self.templates else [] rdkit_res = self._get_2D_by_rdkit(rdkitMol) if pubchem_res is not None: results.append(pubchem_res) if rdkit_res is not None: results.append(rdkit_res) results = results + template_res results.sort(key=lambda l: (l.score, l.source)) if results: to_return = results[0] fix_conformer(to_return.mol.GetConformer(0)) return to_return return DepictionResult( source=DepictionSource.Failed, template_name="", mol=None, score=1000 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sc2depictions(chromosome, root_name=\"output\", lot=0):\n mol_structure = sc2mol_structure(chromosome, lot=lot)\n mol2pdb(mol_structure, \"{0}.pdb\".format(root_name))\n mol2xyz(mol_structure, \"{0}.xyz\".format(root_name))\n Draw.MolToFile(mol_structure, \"{0}.png\".format(root_name))\n logger.info(\"Generated depictions with root name {0}\".format(root_name))", "def applyDemapping(self):\n pass", "def mol_structure2depictions(mol_structure, root_name=\"output\"):\n mol2pdb(mol_structure, \"{0}.pdb\".format(root_name))\n mol2xyz(mol_structure, \"{0}.xyz\".format(root_name))\n Draw.MolToFile(mol_structure, \"{0}.png\".format(root_name))", "def new_decomposition(self, verbose=False):\n V = self.degeneracy_matrix().kernel()\n p = next_prime_of_characteristic_coprime_to(F.ideal(1), self.level())\n T = self.hecke_matrix(p)\n D = T.decomposition_of_subspace(V)\n while len([X for X in D if not X[1]]) > 0:\n p = next_prime_of_characteristic_coprime_to(p, self.level())\n if verbose: print p.norm()\n T = self.hecke_matrix(p)\n D2 = []\n for X in D:\n if X[1]:\n D2.append(X)\n else:\n if verbose: print T.restrict(X[0]).fcp()\n for Z in T.decomposition_of_subspace(X[0]):\n D2.append(Z)\n D = D2\n D = [self.subspace(X[0]) for X in D]\n D.sort()\n S = Sequence(D, immutable=True, cr=True, universe=int, check=False)\n return S", "def _calc_del(dis):\n ndist = len(dis)\n deldis = np.zeros((ndist))\n deldis[0] = (dis[1] - dis[0]) * 0.5\n deldis[1:-1] = (dis[2:] - dis[0:-2]) * 0.5\n deldis[-1] = (dis[-1] - dis[-2]) * 0.5\n return deldis", "def find_degen(aln):\n\n codon_ind = find_aligned_codons(aln)\n aln2 = subalign(aln, codon_ind)\n\n pep_aln = mapalign(aln2, valfunc=seqlib.translate)\n pep = pep_aln.values()[0]\n identies = calc_conservation(pep_aln)\n\n degens = [-1] * aln.alignlen()\n\n for i in range(0, len(codon_ind), 3):\n if pep[i/3] == \"X\":\n continue\n degen = seqlib.AA_DEGEN[pep[i/3]]\n if identies[i/3] == 1.0:\n for j in range(3):\n degens[codon_ind[i+j]] = degen[j]\n\n return degens", "def diamond(T, Vg_start, Vg_end, Ng, Vd_start, Vd_end, Nd, Cs, Cd, Cg, Gs, Gd, num_e, mode='difcon', dVg=False, filename='simData.dat'):\n Vg = scipy.linspace(Vg_start, Vg_end, Ng)\n Vd = scipy.linspace(Vd_start, Vd_end, Nd)\n data_matrix = []\n for (i_vg, vg) in enumerate(Vg):\n myset=system(vg, Cs, Cd, Cg, Gs, Gd, num_e)\n myset.set_temperature(T)\n myset.pre_processing()\n I = []\n P = []\n V_dot = []\n print \"Vg = \", vg\n for vd in Vd:\n myset.tunnel_rate([0, vd, vg]) \n myset.solver() \n I.append(myset.current('drain','dot'))\n P.append(myset.proba('dot'))\n V_dot.append(myset.voltage('dot'))\n # convert lists to scipy arrays\n I = scipy.array(I)\n P = scipy.array(P)\n V_dot = scipy.array(V_dot)\n # compute the diffential conductance\n if mode == 'current':\n Y = Vd\n F = I\n elif mode == 'difcon':\n F, Y = derive(I, Vd)\n F *= 1e3\n elif mode == 'voltage':\n Y = Vd\n F = V_dot\n elif mode == 'francis':\n F_1, Y = derive(I, Vd)\n F_2, Y = derive(Vd-V_dot, Vd)\n F = F_1/F_2\n F *= 1e3\n elif mode == 'sourcis':\n F_1, Y = derive(I, Vd)\n F_2, Y = derive(V_dot, Vd)\n F = F_1/F_2\n F *= 1e3\n data_matrix.append(F)\n data_matrix = array(data_matrix)\n data_matrix = transpose(data_matrix)\n X = Vg\n \n # Derivate with Vg\n if dVg:\n data_dVg = []\n for vd_i in arange(len(Y)):\n F_dVg, X_dVg = derive(data_matrix[vd_i,:], X)\n F_dVg *= 1e3\n data_dVg.append(F_dVg)\n data_matrix = array(data_dVg)\n X = X_dVg\n \n if filename != 0: \n write_file(data_matrix, filename)\n return data_matrix, X, Y", "def decoherence(self,system):\n for i in range(self.n):\n for j in range(i,self.n):\n for item in self.decoherence_matrix[i][j]:\n tmp=Expolist([Expo(item[2],0)])\n t = int(self.index(item[0],item[1]))\n system[int(self.index(i,j))][t]+=tmp\n return system", "def generate(self, di):\n raise NotImplementedError", "def get_deep_distortions(defect_charges: dict, \r\n bdm_type: str='BDM',\r\n stol = 0.2,\r\n ):\r\n fancy_defects = {} #dict of defects undergoing deep distortions\r\n sm = StructureMatcher(ltol=0.2, stol=stol)\r\n for defect in defect_charges.keys():\r\n print(\"\\n\",defect)\r\n for charge in defect_charges[defect]:\r\n defect_name = \"{}_{}\".format(defect, str(charge)) #defect + \"_\" + str(charge)\r\n file_energies = \"{}{}/{}/{}.txt\".format(base_path, defect_name, bdm_type ,defect_name ) \r\n dict_energies, energy_diff, gs_distortion = sort_data(file_energies)\r\n \r\n if float(energy_diff) < -0.1 : #if a significant E drop occured , then store this fancy defect\r\n print(\"Deep distortion found for \", defect_name) \r\n if gs_distortion != \"rattle\":\r\n bdm_distortion = str(round(gs_distortion * 100, 1)) #change distortion format to the one used in file name (eg from 0.1 to 10.0)\r\n if bdm_distortion == \"0.0\":\r\n bdm_distortion = \"-0.0\"\r\n file_path=\"{}{}/{}/{}_{}%_BDM_Distortion/vasp_gam/CONTCAR\".format(base_path, defect_name, bdm_type ,defect_name, bdm_distortion) \r\n else:\r\n bdm_distortion = \"only_rattled\" # file naming format used for rattle\r\n file_path=\"{}{}/{}/{}_{}/vasp_gam/CONTCAR\".format(base_path, defect_name, bdm_type ,defect_name, bdm_distortion) \r\n try:\r\n gs_struct = grab_contcar(file_path) # get the final structure of the E lowering distortion\r\n if gs_struct == \"Not converged\":\r\n print(f\"Problem grabbing gs structure for {bdm_distortion} of {defect_name}\")\r\n except FileNotFoundError:\r\n print(\"NO CONTCAR for ground-state distortion\")\r\n break\r\n if defect in fancy_defects.keys(): #check if defect already in dict (other charge state lead to a lower E structure)\r\n \r\n gs_struct_in_dict = fancy_defects[defect][\"structure\"] \r\n \r\n if energy_diff < fancy_defects[defect][\"energy_diff\"]: #if E drop is greater (more negative), then update the dict with the lowest E distortion\r\n print(\"Charge {} lead to greatest E lowering distortion\".format(charge))\r\n fancy_defects[defect].update(\r\n {\"structure\": gs_struct, \"BDM_distortion\": gs_distortion,\"energy_diff\": energy_diff, \"charges\":[charge]}\r\n ) \r\n \r\n elif defect not in fancy_defects.keys(): # if defect not in dict, add it\r\n print(\"New defect! Adding {} with charge {} to dict\".format(defect, charge))\r\n fancy_defects[defect] = {\"charges\" : [charge], \"structure\": gs_struct, \"energy_diff\": energy_diff, \"BDM_distortion\": gs_distortion}\r\n \r\n #let's check that the gs structure wasn`t found already by BDM for the other charge states \r\n if defect in fancy_defects.keys(): # if the defect lead to an E lowering distortion\r\n for charge in defect_charges[defect]: # for all charge states of the defect\r\n if charge not in fancy_defects[defect][\"charges\"]: #if gs struct wasn't found already for that charge state\r\n defect_name = \"{}_{}\".format(defect, str(charge)) #defect + \"_\" + str(charge)\r\n gs_struct_in_dict = fancy_defects[defect][\"structure\"] \r\n if compare_gs_struct_to_BDM_structs( gs_struct_in_dict, defect_name, base_path, stol = stol ) : \r\n # structure found in BDM calcs for this charge state. Add it to the list to avoid redundant work\r\n fancy_defects[defect][\"charges\"].append(charge)\r\n #print(\"Ground-state structure found for {}_{} has been also found for the charge states: {}\".format(defect, fancy_defects[defect][\"charges\"][0], fancy_defects[defect][\"charges\"] ))\r\n return fancy_defects", "def elimination_technique_2(C):\n rels = C._reidemeister_relators\n rels.sort(reverse=True)\n gens = C._schreier_generators\n for i in range(len(gens) - 1, -1, -1):\n rel = rels[i]\n for j in range(len(gens) - 1, -1, -1):\n gen = gens[j]\n if rel.generator_count(gen) == 1:\n k = rel.exponent_sum(gen)\n gen_index = rel.index(gen**k)\n bk = rel.subword(gen_index + 1, len(rel))\n fw = rel.subword(0, gen_index)\n rep_by = (bk*fw)**(-1*k)\n del rels[i]; del gens[j]\n for l in range(len(rels)):\n rels[l] = rels[l].eliminate_word(gen, rep_by)\n break\n C._reidemeister_relators = rels\n C._schreier_generators = gens\n return C._schreier_generators, C._reidemeister_relators", "def _get_components(\n design_unit: oechem.OEDesignUnit\n ) -> Tuple[oechem.OEGraphMol(), oechem.OEGraphMol(), oechem.OEGraphMol()]:\n from openeye import oechem\n\n protein, solvent, ligand = oechem.OEGraphMol(), oechem.OEGraphMol(), oechem.OEGraphMol()\n\n logging.debug(\"Extracting molecular components ...\")\n design_unit.GetProtein(protein)\n design_unit.GetSolvent(solvent)\n design_unit.GetLigand(ligand)\n\n # delete protein atoms with no name (found in prepared protein of 4ll0)\n for atom in protein.GetAtoms():\n if not atom.GetName().strip():\n logging.debug(\"Deleting unknown atom ...\")\n protein.DeleteAtom(atom)\n\n # perceive residues to remove artifacts of other design units in the sequence of the protein\n # preserve certain properties to assure correct behavior of the pipeline,\n # e.g. deletion of chains in OEKLIFSKinaseApoFeaturizer._process_kinase_domain method\n preserved_info = (\n oechem.OEPreserveResInfo_ResidueNumber\n | oechem.OEPreserveResInfo_ResidueName\n | oechem.OEPreserveResInfo_AtomName\n | oechem.OEPreserveResInfo_ChainID\n | oechem.OEPreserveResInfo_HetAtom\n | oechem.OEPreserveResInfo_InsertCode\n | oechem.OEPreserveResInfo_AlternateLocation\n )\n oechem.OEPerceiveResidues(protein, preserved_info)\n oechem.OEPerceiveResidues(solvent, preserved_info)\n oechem.OEPerceiveResidues(ligand)\n\n logging.debug(\n \"Number of component atoms: \" +\n f\"Protein - {protein.NumAtoms()}, \" +\n f\"Solvent - {solvent.NumAtoms()}, \" +\n f\"Ligand - {ligand.NumAtoms()}.\"\n )\n return protein, solvent, ligand", "def _partition_D(model):\n\n D1_indices = [] # A list of the indices for the unknown nodal displacements\n D2_indices = [] # A list of the indices for the known nodal displacements\n D2 = [] # A list of the values of the known nodal displacements (D != None)\n\n # Create the auxiliary table\n for node in model.Nodes.values():\n \n # Unknown displacement DX\n if node.support_DX==False and node.EnforcedDX == None:\n D1_indices.append(node.ID*6 + 0)\n # Known displacement DX\n elif node.EnforcedDX != None:\n D2_indices.append(node.ID*6 + 0)\n D2.append(node.EnforcedDX)\n # Support at DX\n else:\n D2_indices.append(node.ID*6 + 0)\n D2.append(0.0)\n\n # Unknown displacement DY\n if node.support_DY == False and node.EnforcedDY == None:\n D1_indices.append(node.ID*6 + 1)\n # Known displacement DY\n elif node.EnforcedDY != None:\n D2_indices.append(node.ID*6 + 1)\n D2.append(node.EnforcedDY)\n # Support at DY\n else:\n D2_indices.append(node.ID*6 + 1)\n D2.append(0.0)\n\n # Unknown displacement DZ\n if node.support_DZ == False and node.EnforcedDZ == None:\n D1_indices.append(node.ID*6 + 2)\n # Known displacement DZ\n elif node.EnforcedDZ != None:\n D2_indices.append(node.ID*6 + 2)\n D2.append(node.EnforcedDZ)\n # Support at DZ\n else:\n D2_indices.append(node.ID*6 + 2)\n D2.append(0.0)\n\n # Unknown displacement RX\n if node.support_RX == False and node.EnforcedRX == None:\n D1_indices.append(node.ID*6 + 3)\n # Known displacement RX\n elif node.EnforcedRX != None:\n D2_indices.append(node.ID*6 + 3)\n D2.append(node.EnforcedRX)\n # Support at RX\n else:\n D2_indices.append(node.ID*6 + 3)\n D2.append(0.0)\n\n # Unknown displacement RY\n if node.support_RY == False and node.EnforcedRY == None:\n D1_indices.append(node.ID*6 + 4)\n # Known displacement RY\n elif node.EnforcedRY != None:\n D2_indices.append(node.ID*6 + 4)\n D2.append(node.EnforcedRY)\n # Support at RY\n else:\n D2_indices.append(node.ID*6 + 4)\n D2.append(0.0)\n\n # Unknown displacement RZ\n if node.support_RZ == False and node.EnforcedRZ == None:\n D1_indices.append(node.ID*6 + 5)\n # Known displacement RZ\n elif node.EnforcedRZ != None:\n D2_indices.append(node.ID*6 + 5)\n D2.append(node.EnforcedRZ)\n # Support at RZ\n else:\n D2_indices.append(node.ID*6 + 5)\n D2.append(0.0)\n \n # Legacy code on the next line. I will leave it here until the line that follows has been proven over time.\n # D2 = atleast_2d(D2)\n \n # Convert D2 from a list to a matrix\n D2 = array(D2, ndmin=2).T\n\n # Return the indices and the known displacements\n return D1_indices, D2_indices, D2", "def discriminant(f):\n return f.per(dmp_discriminant(f.rep, f.lev, f.dom), lower=True)", "def _decompose(self, reg):\n raise NotImplementedError('No decomposition available: {}'.format(self))", "def run(self, dag):\n # Initiate the commutation set\n self.property_set['commutation_set'] = defaultdict(list)\n\n # Build a dictionary to keep track of the gates on each qubit\n # The key with format (wire_name) will store the lists of commutation sets\n # The key with format (node, wire_name) will store the index of the commutation set\n # on the wire with wire_name, thus, for example:\n # self.property_set['commutation_set'][wire_name][(node, wire_name)] will give the\n # commutation set that contains node.\n\n for wire in dag.wires:\n wire_name = \"{0}[{1}]\".format(str(wire.register.name), str(wire.index))\n self.property_set['commutation_set'][wire_name] = []\n\n # Add edges to the dictionary for each qubit\n for node in dag.topological_op_nodes():\n for (_, _, edge_data) in dag.edges(node):\n\n edge_name = edge_data['name']\n self.property_set['commutation_set'][(node, edge_name)] = -1\n\n # Construct the commutation set\n for wire in dag.wires:\n wire_name = \"{0}[{1}]\".format(str(wire.register.name), str(wire.index))\n\n for current_gate in dag.nodes_on_wire(wire):\n\n current_comm_set = self.property_set['commutation_set'][wire_name]\n if not current_comm_set:\n current_comm_set.append([current_gate])\n\n if current_gate not in current_comm_set[-1]:\n prev_gate = current_comm_set[-1][-1]\n does_commute = False\n try:\n does_commute = _commute(current_gate, prev_gate, self.cache)\n except TranspilerError:\n pass\n if does_commute:\n current_comm_set[-1].append(current_gate)\n\n else:\n current_comm_set.append([current_gate])\n\n temp_len = len(current_comm_set)\n self.property_set['commutation_set'][(current_gate, wire_name)] = temp_len - 1", "def import_deep_distortion_by_type(defect_list: list,\r\n fancy_defects: dict,\r\n ):\r\n list_deep_distortion = []\r\n for i in defect_list: \r\n if i['name'] in fancy_defects.keys(): # if defect underwent a deep distortion\r\n defect_name = i['name']\r\n print(defect_name)\r\n i['supercell']['structure'] = fancy_defects[defect_name]['structure'] #structure of E lowering distortion\r\n print(\"Using the distortion found for charge state(s) {} with BDM distortion {}\".format(fancy_defects[defect_name]['charges'],\r\n fancy_defects[defect_name][\"BDM_distortion\"] ) )\r\n #remove the charge state of the E lowering distortion distortion\r\n if len(fancy_defects[i['name']]['charges']) > 1:\r\n print(\"Intial charge states of defect:\", i['charges'], \"Will remove the ones where the distortion was found...\")\r\n [i['charges'].remove(charge) for charge in fancy_defects[defect_name]['charges']]\r\n print(\"Trying distortion for charge states:\", i['charges'])\r\n else: \r\n i['charges'].remove(fancy_defects[defect_name]['charges'][0])\r\n if i['charges']: #if list of charges to try deep distortion not empty, then add defect to the list\r\n list_deep_distortion.append(i)\r\n return list_deep_distortion", "def combination_dempster(self, *mass_functions):\n #Define the combination for only two mass functions:\n def combination_two(m1, m2):\n combination = m1.combination_smets(m2)\n combination.focals.pop(next(iter(combination)).get_compatible_empty_element(), None)\n combination.clean()\n combination.normalise()\n return combination\n\n #Combine all the mass functions:\n combination = combination_two(self, mass_functions[0])\n for mass_function in mass_functions[1:]:\n combination = combination_two(combination, mass_function)\n return combination", "def _make_deductions(self):\n \n n = self.n\n progress = True\n all_solved = True\n while progress:\n progress = False\n for y in range(2 * n - 1):\n for x in range(len(self.arr[y])):\n if self.arr[y][x] == '.':\n all_solved = False\n new_candidates = []\n for c in self.candidates[y][x]:\n self.arr[y][x] = c\n fits = self._check(x, y)\n self.arr[y][x] = '.'\n if fits:\n new_candidates.append(c)\n\n self.candidates[y][x] = new_candidates\n if len(new_candidates) == 1:\n progress = True\n self.arr[y][x] = new_candidates[0]\n\n return all_solved", "def get_dems(src_dir):\n \n method_dir, pair_dir = os.path.split(src_dir)\n _, method = os.path.split(method_dir)\n \n method_patterns = {'pairs': ('*_dem.tif',),\n 'pc_align_reg': ('*_dem.tif', '*DEM.tif'),\n 'icesat_reg': ('*dem_reg.tif',),\n 'nuth_reg': ('*dem.tif', '*dem_trans.tif')}\n \n # Get all DEMs in srcdir (should be two)\n dems = []\n for pattern in method_patterns[method]:\n dems_pattern = os.path.join(src_dir, pattern)\n dems.extend(glob.glob(dems_pattern))\n \n return dems", "def decompress(input_dir, dcm_pattern='*.dcm'):\n dcmfiles = sorted(recursive_glob(input_dir, dcm_pattern))\n for dcm in dcmfiles:\n cmd = 'gdcmconv --raw -i \"{0}\" -o \"{0}\"'.format(dcm)\n log.debug('Calling {}.'.format(cmd))\n subprocess.check_call(cmd, shell=True)", "def main():\n\n\toptions = parse_arguments()\n\tcodon_counts = parse.codon_freq_table(options.codon)\n\tgenetic_code = parse.genetic_code(options.codon_table, options.gene_code)\n\n\tdc = degenerate_codons(genetic_code=genetic_code,codon_counts=codon_counts)\n\tdc.compute_results()\n\tdc.output(options.output_format)", "def get_degen(self):\n nspin, nkpt, nband = self.EIG.shape\n \n degen = list()\n for ikpt in range(nkpt):\n \n kpt_degen = list()\n group = list()\n last_ispin, last_iband, last_eig = 0, 0, -float('inf')\n \n for sbe in self.iter_spin_band_eig(ikpt):\n ispin, iband, eig = sbe\n \n if np.isclose(last_eig, eig, rtol=1e-12, atol=1e-5):\n if not group:\n group.append((last_ispin, last_iband))\n group.append((ispin, iband))\n \n else:\n if group:\n kpt_degen.append(group)\n group = list()\n \n last_ispin, last_iband, last_eig = ispin, iband, eig\n \n degen.append(kpt_degen)\n\n self.degen = degen\n \n return degen", "def deconvolute(args):\n prism.deconvolute.run(\n input_fps=args.input,\n output_fp=args.output,\n full_pattern_proportion=args.full_pattern_proportion,\n merge_cutoff=args.merge_cutoff,\n outlier_dispersion_cutoff=args.outlier_dispersion_cutoff,\n intersection_method=args.intersection_method,\n copynumber=args.copynumber,\n cn_prior=args.cn_prior,\n num_max_cluster=args.num_max_cluster,\n seed=args.seed,\n verbose=args.verbose,\n )", "def get_des_gen(tree_before: Node, tree_after: Node) -> Iterator[Description]:\n distance, delta = diff(tree_before, tree_after)\n\n tree_before_iter: Iterator[Node] = infinite_none_suffix(\n post_order_tree_traversal(tree_before)\n )\n tree_after_iter: Iterator[Node] = infinite_none_suffix(\n post_order_tree_traversal(tree_after)\n )\n\n cur_node_before: Node = next(tree_before_iter)\n cur_node_after: Node = next(tree_after_iter)\n\n while cur_node_before or cur_node_after:\n while cur_node_after in delta['inserted']:\n yield describe.describe_insertion(cur_node_after)\n cur_node_after = next(tree_after_iter)\n while cur_node_before in delta['deleted']:\n yield describe.describe_deletion(cur_node_before)\n cur_node_before = next(tree_before_iter)\n if cur_node_before in delta['stayed']['before']:\n yield describe.describe_stayed(cur_node_before, cur_node_after)\n cur_node_before = next(tree_before_iter)\n cur_node_after = next(tree_after_iter)\n return", "def cmd_dele(args):", "def makeDPartial( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n _Kcuts1 = \"~ISMUON & (PT > %(DaugPtLoose)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2Loose)s)\" % locals()['config']\n _KcutsPIDK = \" & (PIDK > %(HighPIDK)s)\" % locals()['config']\n _Kcuts2 = \" & (ISLONG) & (P > %(DaugPLoose)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2Loose)s)\" % locals()['config']\n _Kcuts = _Kcuts1 + _KcutsPIDK + _Kcuts2\n _Picuts1 = \"~ISMUON & (PT > %(DaugPtMin)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2)s)\" % locals()['config']\n _PicutsPIDK = \" & (PIDK < %(LowPIDK)s)\" % locals()['config']\n _Picuts2 = \" & (ISLONG) & (P > %(DaugP)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2)s)\" % locals()['config']\n _Picuts = _Picuts1 + _PicutsPIDK + _Picuts2\n _dauCuts = { 'K+': _Kcuts, 'pi+': _Picuts }\n #_Kcuts1 = \"~ISMUON & (PT > 500* MeV) & (MIPCHI2DV(PRIMARY) > 4)\"\n #_KcutsPIDK = \" & (PIDK > 5)\"\n #_Kcuts2 = \" & (ISLONG) & (P > 5000* MeV) & (TRCHI2DOF < 5)\"\n #_Kcuts = _Kcuts1 + _KcutsPIDK + _Kcuts2\n #_Picuts1 = \"~ISMUON & (PT > 500* MeV) & (MIPCHI2DV(PRIMARY) > 4)\"\n #_PicutsPIDK = \" & (PIDK < 0)\"\n #_Picuts2 = \" & (ISLONG) & (P > 5000* MeV) & (TRCHI2DOF < 5)\"\n #_Picuts = _Picuts1 + _PicutsPIDK + _Picuts2\n #_dauCuts = { 'K+': _Kcuts, 'pi+': _Picuts }\n\n _combCuts = \"(APT > %(D0PtLoose)s* MeV)\" \\\n \"& (AP > %(D0P)s* MeV)\" % locals()['config']\n\n _motherCuts = \"(VFASPF(VCHI2PDOF) < %(D0VtxChi2Ndof)s)\" \\\n \"& (BPVVDCHI2 > %(D0FDChi2)s)\" % locals()['config']\n\n\n _Dminus = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = _dauCuts\n , CombinationCut = _combCuts\n , MotherCut = _motherCuts\n )\n\n return Selection( name+'Sel',\n Algorithm = _Dminus,\n RequiredSelections = inputSel\n )", "def morphological_decomposition(self, string: str):\n self.morph_analyzer.set_input_symbols(self.morph_analyzer.input_symbols())\n self.morph_analyzer.set_output_symbols(self.morph_analyzer.output_symbols())\n self.morph_analyzer.set_start(self.morph_analyzer.start())\n self.morph_analyzer.set_final(self.morph_analyzer.final())\n self.morph_analyzer.set_properties(fst.Fst.EXPANDED, True)\n self.morph_analyzer.set_properties(fst.Fst.ACCEPTOR, True)\n \n words = string.split()\n morph_decomposition = []\n for word in words:\n self.morph_analyzer.set_input_str(word)\n self.morph_analyzer.set_output_str(word)\n self.morph_analyzer.compose(self.morph_analyzer)\n morph_decomp = []\n for path in self.morph_analyzer.paths():\n morph_decomp.append((path.input_str(), path.output_str()))\n morph_decomposition.append(morph_decomp)\n return morph_decomposition", "def getDihedrals(self):\n uniqKpList = self.getFlagData('DIHEDRAL_FORCE_CONSTANT')\n uniqPeriodList = self.getFlagData('DIHEDRAL_PERIODICITY')\n uniqPhaseList = self.getFlagData('DIHEDRAL_PHASE')\n # for list below, true atom number = abs(index)/3 + 1\n dihCodeHList = self.getFlagData('DIHEDRALS_INC_HYDROGEN')\n dihCodeNonHList = self.getFlagData('DIHEDRALS_WITHOUT_HYDROGEN')\n dihCodeList = dihCodeHList + dihCodeNonHList\n properDih = []\n improperDih = []\n condProperDih = [] # list of dihedrals condensed by the same quartet\n #atomPairs = []\n atomPairs = set()\n for i in xrange(0, len(dihCodeList), 5):\n idAtom1 = dihCodeList[i] / 3 # remember python starts with id 0\n idAtom2 = dihCodeList[i+1] / 3\n # 3 and 4 indexes can be negative: if id3 < 0, end group interations\n # in amber are to be ignored; if id4 < 0, dihedral is improper\n idAtom3raw = dihCodeList[i+2] / 3 # can be negative -> exclude from 1-4vdw\n idAtom4raw = dihCodeList[i+3] / 3 # can be negative -> Improper\n idAtom3 = abs(idAtom3raw)\n idAtom4 = abs(idAtom4raw)\n dihTypeId = dihCodeList[i+4] - 1\n atom1 = self.atoms[idAtom1]\n atom2 = self.atoms[idAtom2]\n atom3 = self.atoms[idAtom3]\n atom4 = self.atoms[idAtom4]\n kPhi = uniqKpList[dihTypeId] # already divided by IDIVF\n period = int(uniqPeriodList[dihTypeId]) # integer\n phase = uniqPhaseList[dihTypeId]# angle given in rad in prmtop\n atoms = [atom1, atom2, atom3, atom4]\n dihedral = Dihedral(atoms, kPhi, period, phase)\n if idAtom4raw > 0:\n try: atomsPrev = properDih[-1].atoms\n except: atomsPrev = []\n properDih.append(dihedral)\n if idAtom3raw < 0 and atomsPrev == atoms:\n condProperDih[-1].append(dihedral)\n else:\n condProperDih.append([dihedral])\n pair = (atom1, atom4)\n #if atomPairs.count(pair) == 0 and idAtom3raw > 0:\n if idAtom3raw > 0:\n atomPairs.add(pair)\n else:\n improperDih.append(dihedral)\n try: atomPairs = sorted(atomPairs)\n except: pass\n self.properDihedrals = properDih\n self.improperDihedrals = improperDih\n self.condensedProperDihedrals = condProperDih # [[],[],...]\n self.atomPairs = atomPairs # set((atom1, atom2), ...)\n self.printDebug(\"getDihedrals done\")", "def decomposition(*params, wires):\n raise NotImplementedError", "def dedisperse(self, dm, gulp=10000, **kwargs):\n chan_delays = self.header.getDMdelays(dm)\n max_delay = int(chan_delays.max())\n gulp = max(2 * max_delay, gulp)\n tim_len = self.header.nsamples - max_delay\n tim_ar = np.zeros(tim_len, dtype=\"float32\")\n for nsamps, ii, data in self.readPlan(gulp, skipback=max_delay, **kwargs):\n lib.dedisperse(\n data,\n tim_ar,\n chan_delays,\n max_delay,\n self.header.nchans,\n nsamps,\n ii * (gulp - max_delay),\n )\n return TimeSeries(tim_ar, self.header.newHeader({\"nchans\": 1, \"refdm\": dm}))", "def discrete_molecules(system, rebuild=None, tol=0.4):\n # First we check which operation mode we use.\n # 1) Non-periodic MolecularSystem.\n # 2) Periodic MolecularSystem without rebuilding.\n # 3) Periodic Molecular system with rebuilding (supercell provided).\n if rebuild is not None:\n mode = 3\n else:\n if 'unit_cell' in system.keys():\n if system['unit_cell'].shape == (6,):\n mode = 2\n else:\n mode = 1\n elif 'lattice' in system.keys():\n if system['lattice'].shape == (3, 3):\n mode = 2\n else:\n mode = 1\n else:\n mode = 1\n # We create a list containing all atoms, theirs periodic elements and\n # coordinates. As this process is quite complicated, we need a list\n # which we will gradually be reducing.\n try:\n elements = system['elements']\n coordinates = system['coordinates']\n except KeyError:\n raise _FunctionError(\n \"The 'elements' key is missing in the 'system' dictionary \"\n \"attribute of the MolecularSystem object. Which means, you need to\"\n \" decipher the forcefield based atom keys first (see manual).\"\n )\n coordinates = system['coordinates']\n args = (elements, coordinates)\n adj = 0\n # If there are forcefield 'atom ids' as well we will retain them.\n if 'atom_ids' in system.keys():\n atom_ids = system['atom_ids']\n args = (elements, atom_ids, coordinates)\n adj = 1\n atom_list = compose_atom_list(*args)\n atom_coor = decompose_atom_list(atom_list)[1 + adj]\n # Scenario 1: We load a non-periodic MolecularSystem.\n # We will not have 'unit_cell' nor 'lattice' keywords in the dictionary\n # and also we do not do any re-building.\n # Scenario 2: We load a periodic MolecularSystem. We want to only Extract\n # complete molecules that do not have been affected by the periodic\n # boundary.\n # Scenario 3: We load a periodic Molecular System. We want it to be rebuild\n # therefore, we also provide a supercell.\n # Scenarios 2 and 3 require a lattice and also their origin is at origin.\n # Scenario 1 should have the origin at the center of mass of the system.\n # EDIT 09-04-18: All origins/pseudo_origin had to be skewed towards some\n # direction (x + 0.01) so that there would be no ambiguity in periodic\n # ang highly symmetric systems where the choice of the closest atom would\n # be random from a set of equally far choices - bug found in the testing\n # this way rebuild system should always look the same from the same input\n # and on different machines.\n if mode == 2 or mode == 3:\n # Scenarios 2 or 3.\n origin = np.array([0.01, 0., 0.])\n if 'lattice' not in system.keys():\n matrix = unit_cell_to_lattice_array(system['unit_cell'])\n else:\n matrix = system['lattice']\n pseudo_origin_frac = np.array([0.26, 0.25, 0.25])\n pseudo_origin = cartisian_from_fractional(pseudo_origin_frac, matrix)\n # If a supercell is also provided that encloses the unit cell for the\n # reconstruction of the molecules through the periodic boundary.\n if rebuild is not None:\n selements = rebuild['elements']\n sids = rebuild['atom_ids']\n scoordinates = rebuild['coordinates']\n satom_list = compose_atom_list(selements, sids, scoordinates)\n satom_coor = decompose_atom_list(satom_list)[1 + adj]\n # There is one more step. We need to sort out for all the\n # reconstructed molecules, which are the ones that belong to the\n # unit cell. As we did the reconstruction to every chunk in the unit\n # cell we have now some molecules that belong to neighbouring cells.\n # The screening is simple. If the COM of a molecule translated to\n # fractional coordinates (so that it works for parallelpiped) is\n # within the unit cell boundaries <0, 1> then it's it. There is\n # an exception, for the trajectories, very often the unit cell\n # is centered at origin. Therefore we need to use <-0.5, 0.5>\n # boundary. We will simply decide which is the case by calculating\n # the centre of mass of the whole system.\n system_com = center_of_mass(elements, coordinates)\n if np.allclose(system_com, origin, atol=1e-00):\n boundary = np.array([-0.5, 0.5])\n else:\n boundary = np.array([0., 1.])\n else:\n # Scenario 1.\n pseudo_origin = center_of_mass(\n elements, coordinates) + np.array([0.01, 0., 0.])\n # Here the final discrete molecules will be stored.\n molecules = []\n # Exceptions. Usually end-point atoms that create single bonds or\n # just a separate atoms in the system.\n exceptions = ['H', 'CL', 'BR', 'F', 'HE', 'AR', 'NE', 'KR', 'XE', 'RN']\n # The upper limit for distances analysed for bonds will be assigned for\n # a given system (to save time). We take set('elements') and then find\n # the largest R(cov) in the system and set the max_dist as a double\n # of it plus the 150% tolerance (tol).\n set_of_elements = set(system['elements'])\n max_r_cov = max([\n atomic_covalent_radius[i.upper()] for i in set_of_elements])\n max_dist = 2 * max_r_cov + tol\n # We continue untill all items in the list have been analysed and popped.\n while atom_list:\n inside_atoms_heavy = [\n i for i in atom_list if i[0].upper() not in exceptions\n ]\n if inside_atoms_heavy:\n # Now we create an array of atom coordinates. It does seem\n # somehow counter-intuitive as this is what we started with\n # and made it into a list. But, in my opinion it's the only\n # way to do it. It's hard to control and delete items in two\n # separate arrays that we started with and we don't want\n # atoms already assigned in our array for distance matrix.\n inside_atoms_coord_heavy = decompose_atom_list(inside_atoms_heavy)[\n 1 + adj]\n dist_matrix = euclidean_distances(inside_atoms_coord_heavy,\n pseudo_origin.reshape(1, -1))\n atom_index_x, _ = np.unravel_index(dist_matrix.argmin(),\n dist_matrix.shape)\n # Added this so that lone atoms (even if heavy) close to the\n # periodic boundary are not analysed, as they surely have matching\n # symmetry equivalence that bind to a bigger atom cluster inside\n # the unit_cell.\n potential_starting_point = inside_atoms_heavy[atom_index_x]\n pot_arr = np.array(potential_starting_point[1 + adj:])\n dist_matrix = euclidean_distances(\n atom_coor, pot_arr.reshape(1, -1)\n )\n idx = (dist_matrix > 0.1) * (dist_matrix < max_dist)\n if len(idx) < 1:\n pass\n else:\n working_list = [potential_starting_point]\n else:\n # Safety check.\n break\n final_molecule = []\n while working_list:\n working_list_temp = []\n try:\n atom_coor = decompose_atom_list(atom_list)[1 + adj]\n except _FunctionError:\n atom_coor = None\n for i in working_list:\n if i[0].upper() not in exceptions:\n # It's of GREATEST importance that the i_arr variable\n # is assigned here before entering the atom_coor loop.!\n # Otherwise it will not be re-asigned when the satom_list\n # still iterates, but the atom_list is already empty...\n i_arr = np.array(i[1 + adj:])\n if atom_coor is not None:\n dist_matrix = euclidean_distances(\n atom_coor, i_arr.reshape(1, -1)\n )\n idx = (dist_matrix > 0.1) * (dist_matrix < max_dist)\n neighbours_indexes = np.where(idx)[0]\n for j in neighbours_indexes:\n j_arr = np.array(atom_coor[j])\n r_i_j = distance(i_arr, j_arr)\n r_cov_i_j = atomic_covalent_radius[\n i[0].upper()] + atomic_covalent_radius[\n atom_list[j][0].upper()]\n if r_cov_i_j - tol < r_i_j < r_cov_i_j + tol:\n working_list_temp.append(atom_list[j])\n if rebuild is not None:\n sdist_matrix = euclidean_distances(\n satom_coor, i_arr.reshape(1, -1))\n sidx = (sdist_matrix > 0.1) * (sdist_matrix < max_dist)\n sneighbours_indexes = np.where(sidx)[0]\n for j in sneighbours_indexes:\n if satom_list[j] in atom_list:\n pass\n else:\n j_arr = np.array(satom_coor[j])\n r_i_j = distance(i_arr, j_arr)\n r_cov_i_j = atomic_covalent_radius[\n i[0].upper()\n ] + atomic_covalent_radius[\n satom_list[j][0].upper()]\n if r_cov_i_j - tol < r_i_j < r_cov_i_j + tol:\n working_list_temp.append(satom_list[j])\n final_molecule.append(i)\n else:\n final_molecule.append(i)\n for i in working_list:\n try:\n atom_list.remove(i)\n except ValueError:\n pass\n # We empty the working list as all the items were analysed\n # and moved to the final_molecule list.\n working_list = []\n # We make sure there are no duplicates in the working_list_temp.\n working_list_temp = unique(working_list_temp)\n # Now we move the entries from the temporary working list\n # to the working list for looping analysys.\n for i in working_list_temp:\n # We make sure that only new and unassigned atoms are\n # being transfered.\n if i not in final_molecule:\n working_list.append(i)\n final_molecule_dict = {}\n final_molecule_dict['elements'] = np.array(\n [x[0] for x in final_molecule], dtype='str')\n final_molecule_dict['coordinates'] = np.array(\n [[*xyz[1 + adj:]] for xyz in final_molecule])\n if adj == 1:\n final_molecule_dict['atom_ids'] = np.array(\n [x[1] for x in final_molecule], dtype='str')\n # In general we always want the molecule so the initial bool_ is True.\n bool_ = True\n # But, for periodic only if the molecule is in the initial unit cell.\n if rebuild is not None:\n com = center_of_mass(final_molecule_dict['elements'],\n final_molecule_dict['coordinates'])\n com_frac = fractional_from_cartesian(com, matrix)[0]\n # If we don't round the numerical errors will come up.\n com_frac_round = np.around(com_frac, decimals=8)\n bool_ = np.all(np.logical_and(com_frac_round >= boundary[0],\n com_frac_round < boundary[1]),\n axis=0)\n if bool(bool_) is True:\n molecules.append(final_molecule_dict)\n return molecules", "def testDipoleEdge(self):\n\n sources = DipoleFitTaskTest.runDetection(self)\n\n for i, r1 in enumerate(sources):\n result = r1.extract(\"ip_diffim_DipoleFit*\")\n self.assertTrue(result.get(\"ip_diffim_DipoleFit_flag\"))", "def get_des(tree_before: Node, tree_after: Node) -> List[Description]:\n\n des_gen: Iterator[Description] = get_des_gen(tree_before, tree_after)\n\n return list(des_gen)", "def calc_dihedrals(points):\n\tpiter = iter(points)\n\n\tcalculator = dihedral_calculator()\n\tcalculator.send(None)\n\n\tfor i in range(3):\n\t\tcalculator.send(next(piter))\n\n\tfor point in piter:\n\t\tyield calculator.send(point)", "def decompose(self, reg):\n seq = self._decompose(reg)\n if self.dagger:\n # apply daggers, reverse the Command sequence\n for cmd in seq:\n cmd.op.dagger = not cmd.op.dagger\n seq = list(reversed(seq))\n return seq", "def pr2_motion_designators(desig):\n solutions = []\n\n # Type: moving\n if desig.check_constraints([('type', 'moving'), 'target']):\n if desig.check_constraints(['orientation']):\n solutions.append(desig.make_dictionary([('cmd', 'navigate'), 'target', 'orientation']))\n solutions.append(desig.make_dictionary([('cmd', 'navigate'), 'target', ('orientation', BulletWorld.robot.get_orientation())]))\n\n # Type: pick-up\n if desig.check_constraints([('type', 'pick-up'), 'object']):\n if desig.check_constraints([('arm', 'right')]):\n solutions.append(desig.make_dictionary([('cmd', 'pick'), 'object', ('gripper', robot_description.get_tool_frame('right'))]))\n solutions.append(desig.make_dictionary([('cmd', 'pick'), 'object', ('gripper', robot_description.get_tool_frame('left'))]))\n\n # Type: place\n if desig.check_constraints([('type', 'place'), 'target']):\n if desig.check_constraints(['object']):\n if desig.check_constraints([('arm', 'right')]):\n solutions.append(desig.make_dictionary([('cmd', 'place'), 'target', 'object', ('gripper', robot_description.get_tool_frame('right'))]))\n solutions.append(desig.make_dictionary([('cmd', 'place'), 'target', 'object', ('gripper', robot_description.get_tool_frame('left'))]))\n\n # Type: opening\n if desig.check_constraints([('type', 'opening-prismatic'), 'joint', 'handle', 'part-of']):\n if desig.check_constraints([('arm', 'right')]):\n if desig.check_constraints(['distance']):\n solutions.append(desig.make_dictionary(\n [('cmd', 'open-prismatic'), 'joint', 'handle', ('gripper', robot_description.get_tool_frame('right')),\n 'distance', 'part-of']))\n else:\n solutions.append(desig.make_dictionary(\n [('cmd', 'open-prismatic'), 'joint', 'handle', ('gripper', robot_description.get_tool_frame('right')),\n ('distance', 0.3), 'part-of']))\n else:\n if desig.check_constraints(['distance']):\n solutions.append(desig.make_dictionary(\n [('cmd', 'open-prismatic'), 'joint', 'handle', ('gripper', robot_description.get_tool_frame('left')),\n 'distance', 'part-of']))\n else:\n solutions.append(desig.make_dictionary(\n [('cmd', 'open-prismatic'), 'joint', 'handle', ('gripper', robot_description.get_tool_frame('left')),\n ('distance', 0.3), 'part-of']))\n\n # Type: closing\n if desig.check_constraints([('type', 'closing-prismatic'), 'joint', 'handle', 'part-of']):\n if desig.check_constraints([('arm', 'right')]):\n if desig.check_constraints(['distance']):\n solutions.append(desig.make_dictionary(\n [('cmd', 'close-prismatic'), 'joint', 'handle', ('gripper', robot_description.get_tool_frame('right')),\n 'distance', 'part-of']))\n else:\n solutions.append(desig.make_dictionary(\n [('cmd', 'close-prismatic'), 'joint', 'handle', ('gripper', robot_description.get_tool_frame('right')),\n ('distance', 0.3), 'part-of']))\n else:\n solutions.append(desig.make_dictionary(\n [('cmd', 'close-prismatic'), 'joint', 'handle', 'part-of', ('distance', 0.3),\n ('gripper', robot_description.get_tool_frame('left')), 'part-of']))\n\n # Type: open fridge\n if desig.check_constraints([('type', 'opening-rotational'), 'joint', 'handle', 'part-of']):\n if desig.check_constraints([('arm', 'right')]):\n gripper = robot_description.get_tool_frame('right')\n else:\n gripper = robot_description.get_tool_frame('left')\n if desig.check_constraints(['distance']):\n solutions.append(desig.make_dictionary(\n [('cmd', 'open-rotational'), 'joint', 'handle', ('gripper', gripper), 'distance', 'part-of']\n ))\n else:\n solutions.append(desig.make_dictionary(\n [('cmd', 'open-rotational'), 'joint', 'handle', ('gripper', gripper), ('distance', 1), 'part-of']\n ))\n\n # Type: close fridge\n if desig.check_constraints([('type', 'closing-rotational'), 'joint', 'handle', 'part-of']):\n if desig.check_constraints([('arm', 'right')]):\n gripper = robot_description.get_tool_frame('right')\n else:\n gripper = robot_description.get_tool_frame('left')\n solutions.append(desig.make_dictionary(\n [('cmd', 'close-rotational'), 'joint', 'handle', ('gripper', gripper), 'part-of']\n ))\n\n # Type: move-tcp\n if desig.check_constraints([('type', 'move-tcp'), 'target']):\n if desig.check_constraints([('arm', 'right')]):\n solutions.append(desig.make_dictionary([('cmd', 'move-tcp'), 'target', ('gripper', robot_description.get_tool_frame('right'))]))\n solutions.append(desig.make_dictionary([('cmd', 'move-tcp'), 'target', ('gripper', robot_description.get_tool_frame('left'))]))\n\n # Type: park-arms\n if desig.check_constraints([('type', 'park-arms')]):\n solutions.append(desig.make_dictionary([('cmd', 'park')]))\n\n # Type: looking\n if desig.check_constraints([('type', 'looking')]):\n if desig.check_constraints(['target']):\n solutions.append(desig.make_dictionary([('cmd', 'looking'), 'target']))\n if desig.check_constraints(['object']):\n solutions.append(desig.make_dictionary([('cmd', 'looking'), ('target', BulletWorld.current_bullet_world.\n get_objects_by_name(desig.prop_value('object')).get_pose())]))\n\n # Type: opening-gripper\n if desig.check_constraints([('type', 'opening-gripper'), 'gripper']):\n solutions.append(desig.make_dictionary([('cmd', 'move-gripper'), ('motion', 'open'), 'gripper']))\n\n # Type: closing-gripper\n if desig.check_constraints([('type', 'closing-gripper'), 'gripper']):\n solutions.append(desig.make_dictionary([('cmd', 'move-gripper'), ('motion', 'close'), 'gripper']))\n\n # Type: detecting\n if desig.check_constraints([('type', 'detecting'), 'object']):\n solutions.append(desig.make_dictionary([('cmd', 'detecting'), ('cam_frame', 'wide_stereo_optical_frame'), ('front_facing_axis', [0, 0, 1]), 'object']))\n\n # Type: move-arm-joints\n if desig.check_constraints([('type', 'move-arm-joints')]):\n if desig.check_constraints(['left-arm', 'right-arm']):\n solutions.append(desig.make_dictionary([('cmd', 'move-joints'), ('left-poses', desig.prop_value('left-arm')), ('right-poses', desig.prop_value('right-arm'))]))\n if desig.check_constraints(['left-arm']):\n solutions.append(desig.make_dictionary([('cmd', 'move-joints'), ('left-poses', desig.prop_value('left-arm')), ('right-poses', None)]))\n if desig.check_constraints(['right-arm']):\n solutions.append(desig.make_dictionary([('cmd', 'move-joints'), ('right-poses', desig.prop_value('right-arm')), ('left-poses', None)]))\n\n # Type: world-state-detecting\n if desig.check_constraints([('type', 'world-state-detecting')]):\n solutions.append(desig.make_dictionary([('cmd', 'world-state-detecting'), 'object']))\n\n return solutions", "def make_dhdu(ham, controls, derivative_fn):\n\n dHdu = []\n for ctrl in controls:\n dHdu.append(derivative_fn(ham, ctrl['symbol']))\n\n return dHdu", "def test_exact_supercontrolled_decompose_random(self, seed):\n state = np.random.default_rng(seed)\n decomposer = self.make_random_supercontrolled_decomposer(state)\n self.check_exact_decomposition(random_unitary(4, seed=state).data, decomposer)", "def get_redo_workflow(self, qchem_input_params, sp_params, max_iterations=3):\n\n if self.db is None:\n raise RuntimeError(\"Cannot access database to determine what\"\n \"molecules need to be re-calculated.\")\n\n fws = []\n\n collection = self.db.db[\"molecules\"]\n\n for mol in collection.find({}):\n frequencies = mol[\"output\"][\"frequencies\"]\n\n if any([True if x < 0 else False for x in frequencies]):\n min_molecule_perturb_scale = 0.1\n max_molecule_perturb_scale = 0.3\n scale_grid = 10\n perturb_scale_grid = (max_molecule_perturb_scale -\n min_molecule_perturb_scale) / scale_grid\n msc = MoleculeStructureComparator()\n\n old_molecule = None\n\n for calc in mol[\"calcs_reversed\"]:\n if calc[\"task\"][\"type\"] in [\"freq\", \"frequency\"] and old_molecule is None:\n negative_freq_vecs = calc.get(\"frequency_mode_vectors\")[0]\n old_coords = calc.get(\"initial_geometry\")\n old_molecule = Molecule.from_dict(calc.get(\"initial_molecule\"))\n\n structure_successfully_perturbed = False\n\n for molecule_perturb_scale in np.arange(\n max_molecule_perturb_scale, min_molecule_perturb_scale,\n -perturb_scale_grid):\n new_coords = perturb_coordinates(\n old_coords=old_coords,\n negative_freq_vecs=negative_freq_vecs,\n molecule_perturb_scale=molecule_perturb_scale,\n reversed_direction=False)\n new_molecule = Molecule(\n species=old_molecule.species,\n coords=new_coords,\n charge=old_molecule.charge,\n spin_multiplicity=old_molecule.spin_multiplicity)\n if msc.are_equal(old_molecule, new_molecule):\n structure_successfully_perturbed = True\n break\n if not structure_successfully_perturbed:\n raise Exception(\n \"Unable to perturb coordinates to remove negative frequency without changing the bonding structure\"\n )\n\n mol_id = mol[\"mol_id\"]\n dir_name = mol[\"dir_name\"].split(\"/\")[-1]\n\n if dir_name not in listdir(self.base_dir):\n os.mkdir(join(self.base_dir, dir_name))\n\n fws.append(OptFreqSPFW(molecule=new_molecule,\n name=\"Flattening: {}/{}\".format(mol_id,\n dir_name),\n qchem_cmd=\"qchem -slurm\",\n input_file=join(self.base_dir,\n dir_name,\n mol_id + \".in\"),\n output_file=join(self.base_dir,\n dir_name,\n mol_id + \".out\"),\n qclog_file=join(self.base_dir,\n dir_name,\n mol_id + \".qclog\"),\n max_cores=32,\n max_iterations=max_iterations,\n qchem_input_params=qchem_input_params,\n sp_params=sp_params,\n db_file=self.db_file))\n\n if len(fws) == 0:\n return None\n else:\n return Workflow(fws)", "def DM(self):", "def compare_with_deseq(self):\n project_creator = ProjectCreator()\n project_creator.create_subfolders(\n self._pathcreator.required_deseq_folders()\n )\n arg_libs = [\n self._pathcreator._clean_file_name(lib)\n for lib in self._args.libs.split(\",\")\n ]\n conditions = self._args.conditions.split(\",\")\n replicates = self._args.replicates.split(\",\")\n libs_by_species = self._get_libs_by_species(self._args.libs_by_species)\n size_factor = self._args.size_factor\n self._check_deseq_args(arg_libs, conditions)\n for sp in self._species_folder_prefixes_and_display_names.keys():\n deseq_runner = DESeqRunner(\n sp,\n arg_libs,\n conditions,\n replicates,\n libs_by_species,\n size_factor,\n self._pathcreator.deseq_folders_by_species[sp][\n \"deseq_raw_folder\"\n ],\n self._pathcreator.deseq_folders_by_species[sp][\n \"deseq_extended_folder\"\n ],\n self._pathcreator.deseq_files_by_species[sp][\n \"deseq_script_path\"\n ],\n self._pathcreator.deseq_files_by_species[sp][\n \"deseq_pca_heatmap_path\"\n ],\n self._pathcreator.gene_quanti_files_by_species[sp][\n \"gene_wise_quanti_combined_path\"\n ],\n self._pathcreator.deseq_files_by_species[sp][\n \"deseq_tmp_session_info_script\"\n ],\n self._pathcreator.deseq_files_by_species[sp][\n \"deseq_session_info\"\n ],\n self._args.fc_shrinkage_off,\n self._args.cooks_cutoff_off,\n )\n deseq_runner.create_deseq_script_file()\n deseq_runner.write_session_info_file()\n deseq_runner.run_deseq()\n deseq_runner.merge_counting_files_with_results()", "def test_dg_de(self):\n dfn = lambda x: self.model.g(self.s, x, self.t, self.T)\n nderiv = differentiate(dfn, self.e)\n cderiv = self.model.dg_de(self.s, self.e, self.t, self.T)\n self.assertTrue(np.isclose(nderiv, cderiv, rtol = 1.0e-4))", "def protein_delins(egfr_context):\n params = {\n \"id\": \"normalize.variation:NP_001333827.1%3Ap.Leu747_Thr751delinsPro\",\n \"type\": \"VariationDescriptor\",\n \"variation_id\": \"ga4gh:VA.eDMXxJw9shlSKF3znIg5abniGoyJ3GQ4\",\n \"variation\": {\n \"_id\": \"ga4gh:VA.eDMXxJw9shlSKF3znIg5abniGoyJ3GQ4\",\n \"location\": {\n \"_id\": \"ga4gh:VSL.Mm8duqYDJyel5ZnwScnxLyGH1i9lcl3T\",\n \"interval\": {\n \"end\": {\"value\": 751, \"type\": \"Number\"},\n \"start\": {\"value\": 746, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.vyo55F6mA6n2LgN4cagcdRzOuh38V4mE\",\n \"type\": \"SequenceLocation\"\n },\n \"state\": {\n \"sequence\": \"P\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n },\n \"molecule_context\": \"protein\",\n \"structural_type\": \"SO:1000032\",\n \"vrs_ref_allele_seq\": \"LREAT\",\n \"gene_context\": egfr_context\n }\n return VariationDescriptor(**params)", "def dpf(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['dpf']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for i in xrange(1,4):\n label = \"DPF{0}\".format(str(i))\n lAng_label = 'L{0}ANG'.format(str(i))\n cAng_label = 'C{0}ANG'.format(str(i))\n distillate_label = get_distillate_label([lAng_label, cAng_label])\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_lAng_label = lAng_label\n dep_lAng_name = fields['deps'][0]\n dep_lAng_uuid = self.uuid_map[lAng_label]\n dep_cAng_label = cAng_label\n dep_cAng_name = fields['deps'][1]\n dep_cAng_uuid = self.uuid_map[cAng_label]\n deps = [[dep_lAng_label, dep_lAng_name, dep_lAng_uuid],\n [dep_cAng_label, dep_cAng_name, dep_cAng_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"DPF\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map[label] = emitted[-2][-36:]\n\n filename = \"{0}/DPF_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def test_expand_degeneracies(self):\r\n # No expansion.\r\n self.assertEqual(expand_degeneracies(['ACG']), ['ACG'])\r\n\r\n # Expansion, single sequence.\r\n self.assertEqual(sorted(expand_degeneracies(['RGY'])),\r\n ['AGC', 'AGT', 'GGC', 'GGT'])\r\n\r\n # Multiple sequences.\r\n self.assertEqual(sorted(expand_degeneracies(['ACGW', 'KAT'])),\r\n ['ACGA', 'ACGT', 'GAT', 'TAT'])", "def viz_deseq(self):\n # Create output folders for each species\n project_creator = ProjectCreator()\n project_creator.create_subfolders(\n self._pathcreator.required_viz_deseq_folders()\n )\n\n for sp in self._species_folder_prefixes:\n # Set output folder and files paths for each species\n deseq_path_template = (\n self._pathcreator.deseq_folders_by_species[sp][\n \"deseq_raw_folder\"\n ]\n + \"/deseq_comp_\"\n )\n\n deseq_viz = DESeqViz(\n self._pathcreator.deseq_files_by_species[sp][\n \"deseq_script_path\"\n ],\n deseq_path_template,\n max_pvalue=self._args.max_pvalue,\n )\n deseq_viz.create_scatter_plots(\n self._pathcreator.viz_deseq_files_by_species[sp][\n \"viz_deseq_scatter_plot_path\"\n ]\n )\n deseq_viz.create_volcano_plots(\n self._pathcreator.viz_deseq_files_by_species[sp][\n \"viz_deseq_volcano_plot_path\"\n ],\n self._pathcreator.viz_deseq_files_by_species[sp][\n \"viz_deseq_volcano_plot_adj_path\"\n ],\n )", "def createDelexData():\n # download the data\n loadData()\n\n # create dictionary of delexicalied values that then we will search against, order matters here!\n dic = delexicalize.prepareSlotValuesIndependent()\n\n\n fin1 = file('data/multi-woz/data.json')\n data = json.load(fin1)\n\n fin2 = file('data/multi-woz/dialogue_acts.json')\n data2 = json.load(fin2)\n\n for dialogue_name in tqdm(data):\n dialogue = data[dialogue_name]\n # print dialogue_name\n\n idx_acts = 1\n\n for idx, turn in enumerate(dialogue['log']):\n # normalization, split and delexicalization of the sentence\n sent = normalize(turn['text'])\n\n words = sent.split()\n sent = delexicalize.delexicalise(' '.join(words), dic)\n\n # parsing reference number GIVEN belief state\n sent = delexicaliseReferenceNumber(sent, turn)\n\n # changes to numbers only here\n digitpat = re.compile('\\d+')\n sent = re.sub(digitpat, '[value_count]', sent)\n\n # delexicalized sentence added to the dialogue\n dialogue['log'][idx]['text'] = sent\n\n if idx % 2 == 1: # if it's a system turn\n # add database pointer\n pointer_vector = addDBPointer(turn)\n # add booking pointer\n pointer_vector = addBookingPointer(dialogue, turn, pointer_vector)\n\n # print pointer_vector\n dialogue['log'][idx - 1]['db_pointer'] = pointer_vector.tolist()\n\n # FIXING delexicalization:\n dialogue = fixDelex(dialogue_name, dialogue, data2, idx, idx_acts)\n idx_acts += 1\n\n delex_data[dialogue_name] = dialogue\n\n with open('data/multi-woz/delex.json', 'w') as outfile:\n json.dump(delex_data, outfile)\n\n return delex_data", "def getDels(my_cigar, my_md):\n # we only need the first position of the getDiffLocs range\n # x[1][1:] cleans '^' from dels\n del_loc = (x[0] for x in getDiffLocs(my_cigar, 'D'))\n del_type = (x[1][1:] for x in splitTag(my_md) if x[1][0] == '^')\n return ((x, 'D', y) if len(y) == 1 else (x, 'P', y)\n for x,y in zip(del_loc, del_type))", "def makeDstar2D0Pi( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n daugCuts = \"(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)\" % locals()['config']\n combCuts = \"((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)\" % locals()['config']\n dstarCuts = \"(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)\" \\\n \"& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)\" % locals()['config']\n\n _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = { \"pi+\" : daugCuts }\n , CombinationCut = combCuts\n , MotherCut = dstarCuts\n )\n\n return Selection( name+'Sel',\n Algorithm = _Dstar,\n RequiredSelections = inputSel\n )", "def prepare_data_for_d(data, id2motifs, generator):\n motifs = []\n labels = []\n g_s_args = []\n poss = []\n negs = []\n for i in range(data.x.size(0)):\n if np.random.rand() < 1:\n pos = random.sample(id2motifs[i], min(len(id2motifs[i]), n_sample))\n poss.append(pos)\n g_s_args.append((i, len(pos), True))\n\n\n z = generator(data.x, data.total_edge_index)\n # row, col = data.total_edge_index\n\n\n # x_j = torch.index_select(z, 0, row)\n # x_i = torch.index_select(z, 0, col)\n # one_hop = torch.einsum(\"ef,ef->ef\", x_i, x_j)\n\n negs, _ = sampling(g_s_args, z, data)\n\n # negs =[]\n # for i in range(data.x.size(0)):\n # neg=[]\n # if(len(poss[i])>0):\n # ps= torch.tensor(poss[i][0]).to(device)\n # # pdb.set_trace()\n # x_j = torch.index_select(one_hop, 0, ps)\n # x_i = torch.index_select(one_hop, 0, ps)\n # two_hop = torch.einsum(\"ef,ef->e\", x_j, x_i)\n # __, target = torch.topk(two_hop, len(poss[i]))\n # for k in range(len(poss[i])):\n # neg.append((i, row[target[k]].item(), col[target[k]].item()))\n # negs.append(neg)\n\n \n for pos, neg in zip(poss, negs):\n if len(pos) != 0 and neg is not None:\n motifs.extend(pos)\n labels.extend([1] * len(pos))\n motifs+=neg\n labels.extend([0] * len(neg))\n motifs, labels = shuffle(motifs, labels)\n pdb.set_trace()\n return motifs, labels", "def decompose_molecule(molecule, n=1):\n if isinstance(n, str):\n n = int(n)\n \n # define regexs\n parens = re.compile('\\(([A-z0-9()]+)\\)([0-9]+)?')\n stoich = re.compile('([A-Z][a-z]?)([0-9]+)?')\n\n ps = parens.findall(molecule) # find subgroups in parentheses\n rem = parens.sub('', molecule) # get remainder\n \n if len(ps) > 0:\n for s, ns in ps:\n comp = decompose_molecule(s, ns)\n for k, v in comp.items():\n comp[k] = v * n\n else:\n comp = {}\n \n for e, ns in stoich.findall(rem):\n if e not in comp:\n comp[e] = 0\n if ns == '':\n ns = 1 * n\n else:\n ns = int(ns) * n\n comp[e] += ns\n\n return comp", "def zzX_diophantine(F, c, A, d, p):\n if not A:\n S = [ [] for _ in F ]\n n = zzx_degree(c)\n\n for i, coeff in enumerate(c):\n if not coeff:\n continue\n\n T = zzx_diophantine(F, n-i, p)\n\n for j, (s, t) in enumerate(zip(S, T)):\n t = zzx_mul_const(t, coeff)\n S[j] = zzx_trunc(zzx_add(s, t), p)\n else:\n n = len(A) + 1\n e = zzX_expand(*F)\n\n a, A = A[-1], A[:-1]\n B, G = [], []\n\n for f in F:\n B.append(zzX_quo(e, f))\n G.append(zzX_eval_for(f, n, a))\n\n C = zzX_eval_for(c, n, a)\n\n S = zzX_diophantine(G, C, A, d, p)\n S = [ zzX_lift(1, s) for s in S ]\n\n for s, b in zip(S, B):\n c = zzX_sub_mul(c, s, b)\n\n c = zzX_zz_trunc(c, p)\n\n m = zzX_value(n-1, [1, -a])\n M = zzX_const(n, 1)\n\n for k in xrange(0, d):\n if zzX_zero_p(c):\n break\n\n M = zzX_mul(M, m)\n C = zzX_diff_eval(c, n, k+1, a)\n\n if not zzX_zero_p(C):\n C = zzX_quo_const(C, factorial(k+1))\n T = zzX_diophantine(G, C, A, d, p)\n\n for i, t in enumerate(T):\n T[i] = zzX_mul(zzX_lift(1, t), M)\n\n for i, (s, t) in enumerate(zip(S, T)):\n S[i] = zzX_add(s, t)\n\n for t, b in zip(T, B):\n c = zzX_sub_mul(c, t, b)\n\n c = zzX_zz_trunc(c, p)\n\n S = [ zzX_zz_trunc(s, p) for s in S ]\n\n return S", "def testDipoleTask(self):\n sources = self.runDetection()\n\n offsets = self.params.offsets\n for i, r1 in enumerate(sources):\n result = r1.extract(\"ip_diffim_DipoleFit*\")\n self.assertClose((result['ip_diffim_DipoleFit_pos_flux'] +\n abs(result['ip_diffim_DipoleFit_neg_flux']))/2.,\n self.params.flux[i], rtol=0.02)\n self.assertClose(result['ip_diffim_DipoleFit_pos_centroid_x'],\n self.params.xc[i] + offsets[i], rtol=0.01)\n self.assertClose(result['ip_diffim_DipoleFit_pos_centroid_y'],\n self.params.yc[i] + offsets[i], rtol=0.01)\n self.assertClose(result['ip_diffim_DipoleFit_neg_centroid_x'],\n self.params.xc[i] - offsets[i], rtol=0.01)\n self.assertClose(result['ip_diffim_DipoleFit_neg_centroid_y'],\n self.params.yc[i] - offsets[i], rtol=0.01)\n # Note this is dependent on the noise (variance) being realistic in the image.\n # otherwise it throws off the chi2 estimate, which is used for classification:\n self.assertTrue(result['ip_diffim_DipoleFit_flag_classification'])\n\n # compare to the original ip_diffim_PsfDipoleFlux measurements\n result2 = r1.extract(\"ip_diffim_PsfDipoleFlux*\")\n self.assertClose((result['ip_diffim_DipoleFit_pos_flux'] +\n abs(result['ip_diffim_DipoleFit_neg_flux']))/2.,\n (result2['ip_diffim_PsfDipoleFlux_pos_flux'] +\n abs(result2['ip_diffim_PsfDipoleFlux_neg_flux']))/2.,\n rtol=0.02)\n self.assertClose(result['ip_diffim_DipoleFit_pos_centroid_x'],\n result2['ip_diffim_PsfDipoleFlux_pos_centroid_x'],\n rtol=0.01)\n self.assertClose(result['ip_diffim_DipoleFit_pos_centroid_y'],\n result2['ip_diffim_PsfDipoleFlux_pos_centroid_y'],\n rtol=0.01)\n self.assertClose(result['ip_diffim_DipoleFit_neg_centroid_x'],\n result2['ip_diffim_PsfDipoleFlux_neg_centroid_x'],\n rtol=0.01)\n self.assertClose(result['ip_diffim_DipoleFit_neg_centroid_y'],\n result2['ip_diffim_PsfDipoleFlux_neg_centroid_y'],\n rtol=0.01)\n\n if self.params.display:\n DipolePlotUtils.displayCutouts(r1, self.testImage.diffim,\n self.testImage.posImage, self.testImage.negImage)\n if self.params.display:\n DipolePlotUtils.plt.show()\n\n return result", "def _get_dets_to_use(self, item):\n # Load questions and answers\n question = item['question']\n answer_choices = item['{}_choices'.format(self.mode)]\n\n if self.only_use_relevant_dets:\n dets2use = np.zeros(len(item['objects']), dtype=bool)\n people = np.array([x == 'person' for x in item['objects']], dtype=bool)\n for sent in answer_choices + [question]:\n for possibly_det_list in sent:\n if isinstance(possibly_det_list, list):\n for tag in possibly_det_list:\n if tag >= 0 and tag < len(item['objects']): # sanity check\n dets2use[tag] = True\n elif possibly_det_list.lower() in ('everyone', 'everyones'):\n dets2use |= people\n if not dets2use.any():\n dets2use |= people\n else:\n dets2use = np.ones(len(item['objects']), dtype=bool)\n\n # we will use these detections\n dets2use = np.where(dets2use)[0]\n\n old_det_to_new_ind = np.zeros(len(item['objects']), dtype=np.int32) - 1\n old_det_to_new_ind[dets2use] = np.arange(dets2use.shape[0], dtype=np.int32)\n\n # If we add the image as an extra box then the 0th will be the image.\n if self.add_image_as_a_box:\n old_det_to_new_ind[dets2use] += 1\n # old_det_to_new_ind = old_det_to_new_ind.tolist()\n return dets2use, old_det_to_new_ind", "def check_disulphide_in_pdb(pdb):\n\tstructure_file = \"/Users/ajvenkatakrishnan/Downloads/\" + pdb + \".pdb\"\n\tcmd.load(structure_file, pdb)\n\n\t\"\"\"output file for the list of disulphide bridge candidates\"\"\"\n\tdisulphides_outfile = \"/Users/ajvenkatakrishnan/Downloads/\" + pdb + \"_disulphide.txt\"\n\tf = open(disulphides_outfile, 'w')\n\n\t\"\"\" Create a selection of residues for disulphide scanning \"\"\"\n\tstored.list = []\n\tpymol_selection = \"name CA and (resi 127-167 or resi 198-245 or resi 280-360 or resi 382-412) and \" + pdb\n\t# pymol_selection = \"name CA and (resi 130-148 or resi 406-408)\" + pdb\n\t#pymol_selection = \"name CA and (resi 42-88 or resi 111-166 or resi 209-288 or resi 318-342) and \" + pdb\n\tcmd.iterate((pymol_selection), \"stored.list.append(resi)\")\n\tselection_list = stored.list\n\n\t\"\"\" Generate Cys rotamers at all the positions in the selection \"\"\"\n\tfor resnum_i in stored.list:\n\t\tmutate_to_cys(pdb, resnum_i)\n\n\tcmd.disable(\"rotamer*\")\n\n\t\"\"\" Shortlist Cys-pairs that satisfy the criteria for disulphide bond formation \"\"\"\n\tdisulphide_graph=nx.empty_graph()\n\tcutoff_dist = 4.5\n\n\tfor resnum_i in selection_list:\n\t\tneighbours_list = get_neighbours(resnum_i, cutoff_dist, pdb)\n\t\tfor resnum_j in neighbours_list:\t\n\t\t\tif (int (resnum_i) - int (resnum_j) > 4 or int (resnum_i) - int (resnum_j) < -4):\n\t\t\t\tif str(resnum_j) in selection_list:\n\t\t\t\t\tif not disulphide_graph.has_edge(str(resnum_i), str(resnum_j)):\n\t\t\t\t\t\tcheck_disulphide_criteria(pdb, str(resnum_i), str(resnum_j), f)\n\t\t\t\t\t\tdisulphide_graph.add_edge(str(resnum_i), str(resnum_j))", "def generate_input(self, input_type='input', optimise=False, hessian=False, density=False, energy=False,\n fchk=False, run=True):\n\n molecule = self.molecule.molecule[input_type]\n\n setters = ''\n tasks = ''\n\n # input.dat is the PSI4 input file.\n with open('input.dat', 'w+') as input_file:\n # opening tag is always writen\n input_file.write(f\"memory {self.qm['threads']} GB\\n\\nmolecule {self.molecule.name} {{\\n{self.charge} {self.multiplicity} \\n\")\n # molecule is always printed\n for atom in molecule:\n input_file.write(f' {atom[0]} {float(atom[1]): .10f} {float(atom[2]): .10f} {float(atom[3]): .10f} \\n')\n input_file.write(f\" units angstrom\\n no_reorient\\n}}\\n\\nset {{\\n basis {self.qm['basis']}\\n\")\n\n if energy:\n append_to_log('Writing psi4 energy calculation input')\n tasks += f\"\\nenergy = energy('{self.qm['theory']}')\"\n\n if optimise:\n append_to_log('Writing PSI4 optimisation input', 'minor')\n setters += f\" g_convergence {self.qm['convergence']}\\n GEOM_MAXITER {self.qm['iterations']}\\n\"\n tasks += f\"\\noptimize('{self.qm['theory'].lower()}')\"\n\n if hessian:\n append_to_log('Writing PSI4 Hessian matrix calculation input', 'minor')\n setters += ' hessian_write on\\n'\n\n tasks += f\"\\nenergy, wfn = frequency('{self.qm['theory'].lower()}', return_wfn=True)\"\n\n tasks += '\\nwfn.hessian().print_out()\\n\\n'\n\n if density:\n append_to_log('Writing PSI4 density calculation input', 'minor')\n setters += \" cubeprop_tasks ['density']\\n\"\n\n overage = get_overage(self.molecule.name)\n setters += \" CUBIC_GRID_OVERAGE [{0}, {0}, {0}]\\n\".format(overage)\n setters += \" CUBIC_GRID_SPACING [0.13, 0.13, 0.13]\\n\"\n tasks += f\"grad, wfn = gradient('{self.qm['theory'].lower()}', return_wfn=True)\\ncubeprop(wfn)\"\n\n if fchk:\n append_to_log('Writing PSI4 input file to generate fchk file')\n tasks += f\"\\ngrad, wfn = gradient('{self.qm['theory'].lower()}', return_wfn=True)\"\n tasks += '\\nfchk_writer = psi4.core.FCHKWriter(wfn)'\n tasks += f'\\nfchk_writer.write(\"{self.molecule.name}_psi4.fchk\")\\n'\n\n # TODO If overage cannot be made to work, delete and just use Gaussian.\n # if self.qm['solvent']:\n # setters += ' pcm true\\n pcm_scf_type total\\n'\n # tasks += '\\n\\npcm = {'\n # tasks += '\\n units = Angstrom\\n Medium {\\n SolverType = IEFPCM\\n Solvent = Chloroform\\n }'\n # tasks += '\\n Cavity {\\n RadiiSet = UFF\\n Type = GePol\\n Scaling = False\\n Area = 0.3\\n Mode = Implicit'\n # tasks += '\\n }\\n}'\n\n setters += '}\\n'\n\n if not run:\n setters += f'set_num_threads({self.qm[\"threads\"]})\\n'\n\n input_file.write(setters)\n input_file.write(tasks)\n\n if run:\n sub_run(f'psi4 input.dat -n {self.qm[\"threads\"]}', shell=True)", "def casdetude_genetics():\n file_path = PROJECT_PATH + \"/geographycal_data/Monterusciello/MontEdo_buildings\"\n router = Router(building_file=file_path)\n\n router.design_aqueduct(0)\n\n router.write2epanet(router.acqueduct, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\",\n diam=False)\n\n read_epanet = graphIO.graph_reader(router.acqueduct)\n read_epanet.read_epanet(PROJECT_PATH + \"/geographycal_data/SolvedNet/MonteSolution\")\n kpi_calculator(router.acqueduct)\n\n minimal = router.design_minimal_aqueduct(router.acqueduct, \"Q*H\")\n router.write2epanet(minimal, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\", diam=False)", "def vacuum_cgmd(self):\n\n\t\texstring_dssp = 'except: cannot find dssp at '+gmxpaths['dssp']+\\\n\t\t\t'\\nconsider using the following syntax to download for 64-bit linux:'+\\\n\t\t\t'\\n\\twget ftp://ftp.cmbi.ru.nl/pub/software/dssp/dssp-2.0.4-linux-amd64'+\\\n\t\t\t'\\n\\tor navigate to ftp://ftp.cmbi.ru.nl/pub/software/dssp/'+\\\n\t\t\t'\\n\\tand make sure you add execute permissions'\n\t\t\t\n\t\texstring_martinize = 'except: cannot find martinize at '+gmxpaths['martinize']+\\\n\t\t\t'\\nconsider using the following syntax to download:'+\\\n\t\t\t'\\n\\twget http://md.chem.rug.nl/cgmartini/images/tools/martinize/martinize-2.4/martinize.py'+\\\n\t\t\t'\\n\\tor navigate to http://md.chem.rug.nl/cgmartini/index.php/tools2/proteins-and-bilayers'+\\\n\t\t\t'\\n\\tand make sure you add execute permissions'\n\t\n\t\t#---first test to see if executables are available\n\t\tif not os.path.isfile(os.path.expanduser(gmxpaths['dssp'])): raise Exception(exstring_dssp)\n\t\tif not os.path.isfile(os.path.expanduser(gmxpaths['martinize'])): raise Exception(exstring_martinize)\t\n\t\n\t\tcmd = [gmxpaths['martinize'],\n\t\t\t'-f system-input.pdb',\n\t\t\t'-o system-original.top',\n\t\t\t'-x protein-cg.pdb',\n\t\t\t'-ff martini22','-ed',\n\t\t\t'-dssp '+gmxpaths['dssp']]\n\t\tcall(cmd,logfile='log-martinize',cwd=self.rootdir)\n\t\t\n\t\twith open(self.rootdir+'system-original.top') as fp: lines = fp.readlines()\n\t\tself.itp_protein = [l.split()[0] for l in lines if l[:7] == 'Protein']\n\n\t\t#---note that this section leaves out lipids\n\t\tself.itp_lipid = []\n\t\t\n\t\t#---note that this method is currently set to only simulate one protein\n\t\tself.nprots = [1]\n\t\tself.write_topology_protein('vacuum.top')\n\t\t\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f protein-cg.pdb',\n\t\t\t'-o vacuum-alone.gro']\n\t\tcall(cmd,logfile='log-editconf-convert',cwd=self.rootdir)\n\t\n\t\tprint \"building box with \"+str(self.settings['wbuffer'])+'nm of water'\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f vacuum-alone.gro',\n\t\t\t'-d '+str(self.settings['wbuffer']),\n\t\t\t'-o vacuum.gro','-c']\n\t\tcall(cmd,logfile='log-editconf-vacuum',cwd=self.rootdir)\n\t\t\n\t\tself.minimization_method('vacuum')", "def RemoveDuplicateNodes(self, deci=8, tol=1e-08):\n\n self.__do_essential_memebers_exist__()\n\n from Florence.Tensor import remove_duplicates_2D, makezero\n\n makezero(self.points,tol=1e-10)\n\n points, idx_points, inv_points = remove_duplicates_2D(self.points, decimals=8)\n if points.shape[0] == self.points.shape[0]:\n return\n\n unique_elements, inv_elements = np.unique(self.elements,return_inverse=True)\n unique_elements = unique_elements[inv_points]\n elements = unique_elements[inv_elements]\n elements = elements.reshape(self.elements.shape)\n\n # RECOMPUTE EVERYTHING\n self.elements = np.ascontiguousarray(elements, dtype=np.int64)\n self.points = np.ascontiguousarray(points, dtype=np.float64)\n self.nnode = self.points.shape[0]\n\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n self.GetEdges()\n self.GetBoundaryEdges()\n elif self.element_type == \"tet\" or self.element_type == \"hex\":\n self.GetFaces()\n self.GetBoundaryFaces()\n self.GetBoundaryEdges()", "def makeDstarPartial( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n daugCuts = \"(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)\" % locals()['config']\n combCuts = \"((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)\" % locals()['config']\n dstarCuts = \"(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)\" \\\n \"& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)\" % locals()['config']\n\n _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = { \"pi+\" : daugCuts }\n , CombinationCut = combCuts\n , MotherCut = dstarCuts\n )\n\n return Selection( name+'Sel',\n Algorithm = _Dstar,\n RequiredSelections = inputSel\n )", "def _list_coefficients_by_discriminant(self, fd=True, pos=True, neg=True, printimag=False, norm_neg=True, dmin=0, dmax=0, latex=False, nd=0, Lvals=False, prime=False):\n sig = 1\n S = \"$\"\n if(self._space.WR.is_dual()):\n sig = -1\n maxn = max(self._coeffs[list(self._coeffs.keys())[0]].keys())\n maxD = self._space.WR.level() * (maxn + 1)\n N = self._space.WR.N\n if(dmax > 0):\n w1 = len(str(dmax)) + 1\n else:\n w1 = len(str(maxD)) + 1\n w2 = max(list(map(len, str(self._space.WR.D()).split())))\n w3 = len(str(maxn)) + 1\n mp0 = mpmath.mpf(0)\n mpold = mpmath.mp.dps\n if(mpmath.mp.dps < self.maxdigs):\n mpmath.mp.dps = self.maxdigs\n if(norm_neg and neg):\n cnorm = 0\n tnorm = (0, 0)\n for j in range(1, 100):\n t = rn_from_D(self.space.WR, -j * sig)\n if(t is None):\n continue\n if(t[1] + self._space.WR.Qv[t[0]] >= 0):\n continue\n c1 = self.get_coefficient(t[0], t[1])\n if(c1 is None):\n continue\n # print \"c1 =\",c1\n # If the first coefficient is zero to the precision we assume we shouldn't divide by it\n if(abs(c1) > mpmath.power(10, -self.prec)):\n cnorm = c1 * mpmath.sqrt(j)\n tnorm = t\n print(\"c1=c({0})=c({1})={2}\".format(tnorm, -j * sig, cnorm))\n break\n\n for sn in [1, -1]:\n for D in range(1, maxD):\n # print \"D=\",D\n if(dmin > 0 and abs(D) < dmin):\n continue\n if dmax > 0 and abs(D) > dmax:\n continue\n DD = sig * D * sn\n # print \"D=\",D,is_fundamental_discriminant(D)\n if fd and not is_fundamental_discriminant(DD) and DD != 1:\n # print \"D=\",D,is_fundamental_discriminant(D)\n continue\n if prime and gcd(D, N) > 1:\n continue\n t = rn_from_D(self._space.WR, DD)\n if t is None:\n continue\n else:\n (r, n) = t\n # print \" DD=\",DD,t\n nn = n + self._space.WR.Qv[r]\n if(not pos and nn >= 0):\n continue\n if(not neg and nn < 0):\n continue\n\n c = self.get_coefficient(r, n)\n cs = \"\"\n erms = \"\"\n erm = 10\n if c != 0 and c is not None:\n if nn >= 0:\n ss = \"+\"\n if nn < 0:\n ss = \"-\"\n if(norm_neg):\n if ((r, n) != tnorm) and cnorm != 0:\n c = c / cnorm * mpmath.sqrt(mpmath.mpf(abs(D)))\n x = c.real()\n x1 = floor(x)\n x2 = ceil(x)\n er1 = abs(x1 - x)\n er2 = abs(x2 - x)\n erm = min(er1, er2)\n erms = sci_pretty_print(erm, 2, latex_pow=latex)\n if(erm < 0.001):\n if(er1 < er2):\n cs = str(x1)\n else:\n cs = str(x2)\n elif not printimag:\n if(nd > 0):\n cs = str(c.real()).strip()\n cs = sci_pretty_print(cs, nd, latex_pow=latex)\n else:\n cs = str(c.real())\n else:\n if(nd > 0):\n cs = str(c).strip()\n cs = sci_pretty_print(cs, nd, latex_pow=latex)\n else:\n cs = str(c)\n if(c.real() >= 0 and latex):\n cs = r\"\\hphantom{-}\" + cs\n elif(c.real() >= 0):\n cs = \" \" + cs\n if(latex):\n O = \" & \"\n if(Lvals and list(self._Lv.keys()).count(DD) == 1):\n ls = \"&\" + S + sci_pretty_print(self._Lv[DD], nd, latex_pow=latex) + S\n else:\n ls = \"\"\n if(len(erms) == 0):\n s = S + str(DD).center(w1) + S + \"&\" + S + cs + S + ls + \"\\\\\\\\\"\n else:\n s = S + str(DD).center(w1) + S + \"&\" + S + cs + S + ls + O + S + erms + S + \"\\\\\\\\\"\n else:\n if(Lvals and list(self._Lv.keys()).count(DD) == 1):\n ls = \"\\t\" + sci_pretty_print(self._Lv[DD], nd)\n else:\n ls = \"\"\n if(len(erms) == 0):\n s = \"C^\" + ss + \"[\" + str(DD).center(w1) + \"] = \" + cs + ls\n else:\n s = \"C^\" + ss + \"[\" + str(DD).center(w1) + \"] = \" + cs + ls + \" \" + erms + \"\\n\"\n # s=s+str(self._space.WR.D[r]).ljust(w2)+\",\"+str(n).ljust(w3)+\"] = \"+cs\n print(s)\n mpmath.mp.dps = mpold", "def ddgen(self, c, minimize, maximize):\n\n self.minimize = minimize\n self.maximize = maximize\n\n n = 2\n self.CC = c\n\n\tif self.debug_dd:\n\t print (\"dd(\" + self.pretty(c) + \", \" + `n` + \")...\")\n\n\toutcome = self._dd(c, n)\n\n\tif self.debug_dd:\n\t print (\"dd(\" + self.pretty(c) + \", \" + `n` + \") = \" + `outcome`)\n\n\treturn outcome", "def _get_design_unit(\n self,\n protein_structure: oechem.OEMolBase,\n structure_identifier: str,\n electron_density: Union[oegrid.OESkewGrid, None] = None,\n ligand_name: Union[str, None] = None,\n chain_id: Union[str, None] = None,\n alternate_location: Union[str, None] = None\n ) -> oechem.OEDesignUnit:\n from openeye import oechem\n\n from ..modeling.OEModeling import prepare_complex, prepare_protein\n from ..utils import LocalFileStorage\n\n if ligand_name == \"-\":\n ligand_name = None\n\n if alternate_location == \"-\":\n alternate_location = None\n\n # generate unique design unit name\n design_unit_path = LocalFileStorage.featurizer_result(\n self.__class__.__name__,\n \"_\".join([\n structure_identifier,\n f\"ligand{ligand_name}\",\n f\"chain{chain_id}\",\n f\"altloc{alternate_location}\"\n ]),\n \"oedu\",\n self.cache_dir\n )\n if not design_unit_path.is_file():\n logging.debug(\"Generating design unit ...\")\n if ligand_name is None:\n design_unit = prepare_protein(\n protein_structure,\n chain_id=chain_id,\n alternate_location=alternate_location,\n cap_termini=False\n )\n else:\n design_unit = prepare_complex(\n protein_structure,\n electron_density=electron_density,\n chain_id=chain_id,\n alternate_location=alternate_location,\n ligand_name=ligand_name,\n cap_termini=False\n )\n logging.debug(\"Writing design unit ...\")\n oechem.OEWriteDesignUnit(str(design_unit_path), design_unit)\n # re-reading design unit helps proper capping of e.g. 2itz\n # TODO: revisit, report bug\n logging.debug(\"Reading design unit from file ...\")\n design_unit = oechem.OEDesignUnit()\n oechem.OEReadDesignUnit(str(design_unit_path), design_unit)\n\n return design_unit", "def _get_2D_by_template(self, mol):\n results = list()\n try:\n for key, template in self.templates.items():\n temp_mol = Chem.RWMol(mol)\n if temp_mol.HasSubstructMatch(template):\n AllChem.GenerateDepictionMatching2DStructure(temp_mol, template)\n flaws = DepictionValidator(temp_mol).depiction_score()\n results.append(\n DepictionResult(\n source=DepictionSource.Template,\n template_name=key,\n mol=temp_mol,\n score=flaws,\n )\n )\n except Exception:\n # if it fails it fails, but generally it wont\n logging.warning(\"Depiction generation by template failed\")\n\n return results", "def convertDIMACS(formula : Formula):\n\n # Check the given Formula and based on what it is, print an Error or the DIMACS-Format\n # This is to prevent Errors which can occur because the parsing of the atom can\n # be out of Index and a Exception can get triggered.\n\n if formula.top:\n print(\"This is a tautology! No need to print the DIMACS-Format!\")\n return\n\n elif formula.bot:\n print(\"This is a unsatisfiable Formula! No need to print the DIMACS-Format!\")\n return\n\n elif formula.atom: # If the Formula is only one atom, then print the following\n print(\"p cnf 1 1\\n1 0\")\n return\n\n try:\n if formula.neg.atom: # If the Formula is only one negated atom\n print(\"p cnf 1 1\\n-1 0\")\n return\n except:\n pass\n\n # Checking of the Formula end. Now move on to parse it and get the DIMACS-Format\n # ===============================================================================\n\n res = []\n acc = []\n variables = []\n str_form = str(formula)\n i = 0\n\n while i < len(str(formula)):\n\n c = str_form[i]\n\n if c == \"\\u00AC\": # If there is a neg char\n # Format: i = neg, i+1 = BLANK, i+2 = Atom beginning\n\n atom, i = getAtom(str_form, i+2) # Because of the format an index shift\n\n c += atom\n\n if c not in acc: acc.append(c) # Make sure by checking, that you don't put one Variable in which is already in \n \n if atom not in variables: variables.append(atom) # Check if the atom is already in the List, otherwise append it\n\n\n elif c in string.ascii_lowercase: # If you read a lowercase char, then it is a Atom and check how long it continues\n \n atom, i = getAtom(str_form, i)\n\n if atom not in acc: acc.append(atom)\n\n if atom not in variables: variables.append(atom)\n\n elif c == \"\\u2227\": # If there is a and Symbol, append the current set\n res.append(acc)\n acc = []\n\n i += 1\n\n res.append(acc) # Make sure the last set is also appended because there is no and symbol\n\n variables.sort() # Make sure the char set is sorted so it is easier to comprehend the Result\n print(variables)\n\n print(\"p cnf %d %d\" % (len(variables), len(res))) # Calculate the used variables and the count of the formulas\n\n # Loop trogh everey set of sets and check which char is shown at the moment\n for l in res:\n\n for i in l:\n\n if i[0] == \"\\u00AC\": # When there is a neg then get the index of i+1 and set a - in front\n print(\"-\" + str(variables.index(i[1:]) + 1), end=\" \")\n else:\n print(variables.index(i) + 1, end=\" \") # Normal char which can be printed normally\n\n print(\"0\")", "def pseudopotentialise_molecule(self, sysargs=None, execute_deletion=True):\n\n # Find atoms to replace\n deletion_list = []\n if len(sysargs) > 2:\n if 'del' in sysargs:\n deletion_list = self.parse_coord_list(sysargs[4])\n replacement_list = self.parse_coord_list(sysargs[2])\n atoms_to_potentialise = list(item for item in self.coord_list if item[\"#\"] in replacement_list)\n else:\n atoms_to_potentialise = (item for item in self.coord_list if item[\"el\"] == 'c')\n deletion_list = (item for item in self.coord_list if item[\"el\"] == 'h')\n print('Pseudo-potentialising carbon atoms %s ...' % [atom['#'] for atom in atoms_to_potentialise])\n\n potential_coords_list = []\n\n for atom in atoms_to_potentialise:\n distanced_atom_list = self.order_atoms_by_distance_from(atom['#'])\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n\n if len(distanced_carbon_list) == 1:\n primary_vector = None\n for non_c_atom in distanced_atom_list[1:4]:\n if non_c_atom['el'] != 'h':\n primary_vector = self.vectorise_atom(non_c_atom['#']) - self.vectorise_atom(atom['#'])\n if primary_vector is None:\n primary_vector = self.vectorise_atom(distanced_atom_list[1]['#']) - self.vectorise_atom(atom['#'])\n else:\n primary_vector = self.vectorise_atom(distanced_carbon_list[1]['#']) - self.vectorise_atom(atom['#'])\n\n normal_vector = numpy.cross(\n self.vectorise_atom(distanced_atom_list[1]['#']) - self.vectorise_atom(atom['#']),\n self.vectorise_atom(distanced_atom_list[2]['#']) - self.vectorise_atom(atom['#'])\n )\n\n primary_potential_vector = self.lengtherise_vector(primary_vector, self.atom_potential_set_distance)\n potential_set_split_vector = self.lengtherise_vector(normal_vector, self.potential_set_split_distance)\n\n relative_potential_vectors = [\n primary_potential_vector + potential_set_split_vector,\n primary_potential_vector - potential_set_split_vector\n ]\n\n for potential_set in range(self.no_potential_sets_per_atom-1):\n\n pps_positive = numpy.dot(self.construct_euler_rodriguez_matrix(\n normal_vector,\n 2*numpy.pi/self.no_potential_sets_per_atom),\n relative_potential_vectors[-2],\n )\n pps_negative = numpy.dot(self.construct_euler_rodriguez_matrix(\n normal_vector,\n 2*numpy.pi/self.no_potential_sets_per_atom),\n relative_potential_vectors[-1]\n )\n\n relative_potential_vectors.append(pps_positive)\n relative_potential_vectors.append(pps_negative)\n\n if self.add_primary_vector_potentials_as_coords is False:\n del relative_potential_vectors[0]\n del relative_potential_vectors[0]\n\n # potential coords are still relative to their atom, now make them real.\n for vector in relative_potential_vectors:\n potential_coords_list.append(\n {'#': 0, 'el': self.sp2_pseudo_element, 'x': vector[0]+atom['x'], 'y': vector[1]+atom['y'], 'z': vector[2]+atom['z']},\n )\n\n # Now add potentials to coord list, after removing the 'real' hydrogen atoms.\n if execute_deletion is True:\n self.delete_specified_atoms(deletion_list)\n for potential_coord in potential_coords_list:\n self.write_coord(potential_coord, overwrite=False)", "def tester_dad2():\n dad2 = DigilentAnalogDiscovery2()\n yield dad2\n dad2.driver.pins_reset()", "def global_decomposition(iterable = None):\n\tfor graph in iterable:\n\t\tyield graph_decomposition(graph = graph)", "def call_dssp(self):\n import dssp_features\n output_name_A = \"output/\" + self.complex_name + \"_\" + self.chains[0] + \"_dssp.txt\"\n dssp_command_A = \"dssp -i \" + self.complex_name_A + \" > \" + output_name_A\n os.system(dssp_command_A)\n dssp_chain_A = dssp_features.DSSP_features(self.complex_name_A, 6, pdb_chain = self.chains[0])\n\n output_name_B = \"output/\" + self.complex_name + \"_\" + self.chains[1] + \"_dssp.txt\"\n dssp_command_B = \"dssp -i \" + self.complex_name_B + \" > \" + output_name_B\n os.system(dssp_command_B)\n dssp_chain_B = dssp_features.DSSP_features(self.complex_name_B, 6, pdb_chain = self.chains[1])\n\n complex_pdb = self.complex_name + \".pdb\"\n output_name = \"output/\" + self.complex_name + \"_dssp.txt\"\n dssp_command = \"dssp -i \" + complex_pdb + \" > \" + output_name\n os.system(dssp_command)\n dssp_chain_comp_A = dssp_features.DSSP_features(complex_pdb, 6, pdb_chain = self.chains[0])\n dssp_chain_comp_B = dssp_features.DSSP_features(complex_pdb, 6, pdb_chain = self.chains[1])\n return dssp_chain_A, dssp_chain_B, dssp_chain_comp_A, dssp_chain_comp_B", "def discrim(inputs, **kwargs):\n return Discrim(**kwargs)(inputs)", "def checkDegenerated(self):\n\n degenerated = False\n\n if np.min(isfinite(self.C)) == 0:\n degenerated = True\n\n elif not ((10**(-16)) < self.sigma_mean < (10**16)):\n degenerated = True\n\n else:\n self.D, self.B = eig(self.C)\n self.D = sqrt(self.D)\n self.D.shape = (self.n,1) # Force D to be a column vector\n if not isreal(self.D).all():\n degenerated = True\n\n if degenerated:\n self.restart()", "def parse_D(self, line):\n # \"line\" can be:\n # D name node1 node2 i0=val m=val v0=val\n # D node1 node2 i0=val m=val v0=val\n tokens = line.split()\n\n if len(tokens) < 6 or len(tokens) > 7:\n raise Exception(\"Incorrect number of arguments.\")\n\n # at this point, the number of tokens is either 6 or 7\n if len(tokens) == 6:\n name = None\n base = 1\n else:\n name = tokens[1]\n base = 2\n\n node1 = tokens[base]\n node2 = tokens[base + 1]\n\n # the parameters are at [base+2] to [base+4]\n parameters = self.extract_parameters(tokens[base+2 : base+5])\n self.check_parameter_existence(parameters, [\"i0\", \"m\", \"v0\"])\n\n return IComponent.Diode(node1, node2, parameters[\"i0\"],\n parameters[\"m\"], parameters[\"v0\"], name)", "def casdetude_dinardo():\n file_path = PROJECT_PATH + \"/geographycal_data/Monterusciello/MontEdo_buildings\"\n router = Router(building_file=file_path)\n\n router.design_aqueduct(0)\n\n router.solve(router.acqueduct)\n minimal = router.design_minimal_aqueduct(router.acqueduct, \"Q*H\")\n kpi_calculator(minimal)\n\n print(\"N H Z P\")\n for i, (node, datadict) in enumerate(router.acqueduct.nodes.items()):\n print(i, round(datadict[\"H\"]), round(datadict[\"ELEVATION\"]), round(datadict[\"H\"] - datadict[\"ELEVATION\"]))\n\n\n router.write2shp(minimal, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\")\n router.write2epanet(minimal, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\")", "def test_calculate_deuces(self):\n\n board = ['4h', '5h', '8h', '7h', '9c']\n hand = ['2s', '3s']\n\n deuces = calculate_deuces(board, hand)\n\n self.assertEqual(deuces[0], 7414)\n self.assertEqual(deuces[1], 'High Card')", "def __call__(self, node):\n if not node.children: return;\n if len(node.children) <= 2: return;\n if self.IsGoodTriple(node.children): return;\n if len(node.children) >= 8: raise ValueError(\"Too long to decompose\");\n children = map(lambda x : [self.GetLabel(x)], node.children);\n #print \"Guessing %s\" % children;\n print node.ToPrettyString();\n res = self.path_finder.FindPath(children, self.GetLabel(node));\n if len(res) != 0:\n print res[0];\n tnodes, count = self.Transform(res[0][1], node, 0);\n node.children = tnodes.children;\n else:\n raise ValueError(\"Find no production chains to decompose for %s\" % children);\n print node.ToPrettyString();", "def generate_input(self, run=True):\n\n if (self.qm['ddec_version'] != 6) and (self.qm['ddec_version'] != 3):\n append_to_log(message='Invalid or unsupported DDEC version given, running with default version 6.',\n msg_type='warning')\n self.qm['ddec_version'] = 6\n\n # Write the charges job file.\n with open('job_control.txt', 'w+') as charge_file:\n\n charge_file.write(f'<input filename>\\n{self.molecule.name}.wfx\\n</input filename>')\n\n charge_file.write('\\n\\n<net charge>\\n0.0\\n</net charge>')\n\n charge_file.write('\\n\\n<periodicity along A, B and C vectors>\\n.false.\\n.false.\\n.false.')\n charge_file.write('\\n</periodicity along A, B and C vectors>')\n\n charge_file.write(f'\\n\\n<atomic densities directory complete path>\\n{self.descriptions[\"chargemol\"]}/atomic_densities/')\n charge_file.write('\\n</atomic densities directory complete path>')\n\n charge_file.write(f'\\n\\n<charge type>\\nDDEC{self.qm[\"ddec_version\"]}\\n</charge type>')\n\n charge_file.write('\\n\\n<compute BOs>\\n.true.\\n</compute BOs>')\n\n # sub_run(f'psi4 input.dat -n {self.qm[\"threads\"]}', shell=True)\n # sub_run('mv Dt.cube total_density.cube', shell=True)\n\n if run:\n control_path = 'chargemol_FORTRAN_09_26_2017/compiled_binaries/linux/Chargemol_09_26_2017_linux_serial job_control.txt'\n sub_run(f'{self.descriptions[\"chargemol\"]}/{control_path}', shell=True)", "def CycleGAN(g_conv_dim=64, d_conv_dim=64, n_res_blocks=6):\n \n # Instantiate generators\n G_XtoY = Generator(conv_dim=g_conv_dim, n_res_blocks=n_res_blocks)\n G_YtoX = Generator(conv_dim=g_conv_dim, n_res_blocks=n_res_blocks)\n # Instantiate patch discriminators\n Dp_X = PatchDiscriminator(conv_dim=d_conv_dim)\n Dp_Y = PatchDiscriminator(conv_dim=d_conv_dim)\n # Instantiate global discriminators\n Dg_X = GlobalDiscriminator(conv_dim=d_conv_dim)\n Dg_Y = GlobalDiscriminator(conv_dim=d_conv_dim)\n\n # move models to GPU, if available\n cuda_available = torch.cuda.is_available()\n device = torch.device(\"cuda:0\" if cuda_available else \"cpu\")\n\n device = torch.device(device)\n G_XtoY.to(device)\n G_YtoX.to(device)\n Dp_X.to(device)\n Dp_Y.to(device)\n Dg_X.to(device)\n Dg_Y.to(device)\n\n print('Using {}.'.format(\"GPU\" if cuda_available else \"CPU\"))\n return G_XtoY, G_YtoX, Dp_X, Dp_Y, Dg_X, Dg_Y", "def concerted_unimolecular_elimination(rct_zmas, prd_zmas):\n\n # Initialize info for the returns\n ret = None, None, None, None, None\n finish_build = True\n\n # Attempt to build appropriate z-matrix\n prd_zmas, prd_gras = shifted_standard_zmas_graphs(\n prd_zmas, remove_stereo=True)\n if len(rct_zmas) == 1:\n count = 1\n while True:\n rct_zmas, rct_gras = shifted_standard_zmas_graphs(\n rct_zmas, remove_stereo=True)\n init_zma, = rct_zmas\n\n tras, _, _ = automol.graph.reac.elimination(rct_gras, prd_gras)\n if tras is not None:\n if len(tras[0]) == 1:\n tras = [tras]\n min_dist = 100.\n frm_bnd_key = None\n for tra_i in tras:\n # Get the bond formation and breaking keys\n bnd_key, = automol.graph.trans.formed_bond_keys(tra_i)\n geo = automol.zmatrix.geometry(rct_zmas[0])\n dist = automol.geom.distance(geo, *list(bnd_key))\n if dist < min_dist:\n min_dist = dist\n frm_bnd_key = bnd_key\n tra = tra_i\n brk_keys = automol.graph.trans.broken_bond_keys(tra)\n brk_bnd_key1, brk_bnd_key2 = brk_keys\n init_zma, = rct_zmas\n\n\n # Get index for migrating atom (or bond-form atom in group)\n for bnd_key in (brk_bnd_key1, brk_bnd_key2):\n if bnd_key & frm_bnd_key:\n mig_key = next(iter(bnd_key & frm_bnd_key))\n for key in frm_bnd_key:\n if key != mig_key:\n a1_idx = key\n\n # Get chain for redefining the rc1_atm1_key z-matrix entries\n _, gras = shifted_standard_zmas_graphs(\n [init_zma], remove_stereo=True)\n gra = functools.reduce(automol.graph.union, gras)\n xgr1, = automol.graph.connected_components(gra)\n atm1_neighbors = _atom_neighbor_keys(xgr1)[a1_idx]\n for idx in atm1_neighbors:\n num_keys = len(_atom_neighbor_keys(xgr1)[idx])\n if idx != mig_key and num_keys > 1:\n a2_idx = idx\n atm2_neighbors = _atom_neighbor_keys(xgr1)[a2_idx]\n for idx in atm2_neighbors:\n if idx not in (mig_key, a1_idx):\n a3_idx = idx\n\n mig_redef_keys = (a1_idx, a2_idx, a3_idx)\n\n # determine if the zmatrix needs to be rebuilt by x2z\n # determines if the hydrogen atom is used to define other atoms\n rebuild = False\n if any(idx > mig_key for idx in mig_redef_keys):\n rebuild = True\n\n # rebuild zmat and go through while loop again if needed\n # shift order of cartesian coords & rerun x2z to get a new zmat\n # else go to next stage\n if rebuild:\n reord_zma = reorder_zmatrix_for_migration(\n init_zma, a1_idx, mig_key)\n rct_zmas = [reord_zma]\n count += 1\n if count == 3:\n finish_build = False\n break\n else:\n rct_zma = init_zma\n finish_build = True\n break\n else:\n finish_build = False\n\n # If z-mat with good order found, finish building it\n if finish_build:\n\n # determine the new coordinates\n rct_geo = automol.zmatrix.geometry(rct_zma)\n distance = automol.geom.distance(\n rct_geo, mig_key, a1_idx)\n angle = automol.geom.central_angle(\n rct_geo, mig_key, a1_idx, a2_idx)\n dihedral = automol.geom.dihedral_angle(\n rct_geo, mig_key, a1_idx, a2_idx, a3_idx)\n # Reset the keys for the migrating H atom\n new_idxs = (a1_idx, a2_idx, a3_idx)\n key_dct = {mig_key: new_idxs}\n ts_zma = automol.zmatrix.set_keys(rct_zma, key_dct)\n\n # Reset the values in the value dict\n mig_names = automol.zmatrix.name_matrix(ts_zma)[mig_key]\n ts_zma = automol.zmatrix.set_values(\n ts_zma, {mig_names[0]: distance,\n mig_names[1]: angle,\n mig_names[2]: dihedral}\n )\n\n # standardize the ts zmat and get tors and dist coords\n coo_dct = automol.zmatrix.coordinates(ts_zma)\n dist_coo_key = tuple(reversed(sorted(frm_bnd_key)))\n dist_name = next(coo_name for coo_name, coo_keys in coo_dct.items()\n if dist_coo_key in coo_keys)\n ts_name_dct = automol.zmatrix.standard_names(ts_zma)\n dist_name = ts_name_dct[dist_name]\n ts_zma = automol.zmatrix.standard_form(ts_zma)\n\n # Get the name of the coordinate of the other bond that is breaking\n brk_dist_name = None\n for brk_key in (brk_bnd_key1, brk_bnd_key2):\n if not brk_key.intersection(frm_bnd_key):\n brk_dist_name = automol.zmatrix.bond_key_from_idxs(\n ts_zma, brk_key)\n\n # Add second attempt to get brk_dist_name\n if brk_dist_name is None:\n brk_dist_names = [\n automol.zmatrix.bond_key_from_idxs(ts_zma, brk_bnd_key1),\n automol.zmatrix.bond_key_from_idxs(ts_zma, brk_bnd_key2)\n ]\n # Grab the name that is not None\n for name in brk_dist_names:\n if name is not None:\n brk_dist_name = name\n\n # get full set of potential torsional coordinates\n pot_tors_names = automol.zmatrix.torsion_coordinate_names(rct_zma)\n\n # remove the torsional coordinates that would break reaction coordinate\n gra = automol.zmatrix.graph(ts_zma, remove_stereo=True)\n coo_dct = automol.zmatrix.coordinates(ts_zma)\n tors_names = []\n for tors_name in pot_tors_names:\n axis = coo_dct[tors_name][0][1:3]\n grp1 = [axis[1]] + (\n list(automol.graph.branch_atom_keys(gra, axis[0], axis) -\n set(axis)))\n grp2 = [axis[0]] + (\n list(automol.graph.branch_atom_keys(gra, axis[1], axis) -\n set(axis)))\n if not ((mig_key in grp1 and a1_idx in grp2) or\n (mig_key in grp2 and a1_idx in grp1)):\n tors_names.append(tors_name)\n\n # Get reactants graph\n _, rct_gras = shifted_standard_zmas_graphs(\n [rct_zma], remove_stereo=True)\n rcts_gra = automol.graph.union_from_sequence(rct_gras)\n\n brk_bnd_key1 = shift_vals_from_dummy(brk_bnd_key1, ts_zma)\n brk_bnd_key2 = shift_vals_from_dummy(brk_bnd_key2, ts_zma)\n brk_bnd_keys = frozenset({brk_bnd_key1, brk_bnd_key2})\n frm_bnd_key = shift_vals_from_dummy(frm_bnd_key, ts_zma)\n\n ret = ts_zma, dist_name, brk_dist_name, brk_bnd_keys, frm_bnd_key, tors_names, rcts_gra\n\n return ret", "def compile_ODEs(self):\n global dydt, ct, pool\n \n if hasattr(self, \"pool\"):\n self.pool.close()\n \n self.compute_dependencies()\n self.compute_Jacobian()\n self.derivative_components = {}\n for name in self.names_species:\n self.derivative_components[name] = compile(self.ODEs[name], \"dydt_\" + name, \"eval\")\n \n# n_processes = 8\n# barycenters = self.get_barycenters()\n# inner_cids = []\n# cids = []\n# for i in xrange(n_processes):\n# cids.append(set())\n# inner_cids.append(set())\n# \n# for cid, coords in barycenters.items():\n# bin_ = np.sum((coords > np.median(barycenters.values(), axis=0)) * (2 ** np.arange(3)))\n# inner_cids[bin_].add(cid)\n# cids[bin_].add(cid)\n# cids[bin_].update(self.mesh.border_neighbors(3, cid))\n \n n_sub_bins = 3 * self.n_bins - 2\n cids = [self.cids[np.logical_and(i - 2 <= self.bins, self.bins<= i + 2)] for i in xrange(0, n_sub_bins, 3)]\n inner_cids = [self.cids[np.logical_and(i - 1 <= self.bins, self.bins<= i + 1)] for i in xrange(0, n_sub_bins, 3)]\n# print self.bins\n# print cids\n# print inner_cids\n \n dydt = multicell.parallel.ConcentrationTableMultiprocessing(self.names_species, self.cids) \n ct = multicell.parallel.ConcentrationTableMultiprocessing(self.names_species, self.cids)\n \n pool = multiprocess.Pool(initializer=init, initargs=(dydt.rawarray, ct.rawarray, self)) \n self.pool = pool\n \n def derivative(y, t):\n global dydt, ct, pool\n # Initialization of the derivative vector\n dydt.fill(0)\n ct.import_values(y)\n ct *= (ct>0)\n \n # multiprocessing\n \n pool.map(work, [(t, cids[i], inner_cids[i]) for i in xrange(self.n_bins)])\n# print dydt\n# pool.join()\n \n result = dydt.as_1d_array()\n\n # Test\n #print len(result), len(y)\n assert len(result) == len(y), \"y and dydt are different lengths\"\n \n for name in self.names_species:\n assert not np.any(np.isnan(self.y.current().get_species(name))), \"NaN value in concentrations of %s\" % name\n assert not np.any(np.isinf(self.y.current().get_species(name))), \"Inf value in concentrations of %s\" % name\n \n return result\n \n self.derivative = derivative", "def derivative(polinom):\n l = []\n for i in range(1, polinom.degree+1):\n l.append(polinom.coefficients[i]*i)\n return Polinom(l)", "def deduce(fig, pos, validgroups, groups, telegram):\n for _, l in validgroups:\n used = [False] * SPACES\n usedcols = [False] * COLNUM\n for i in range(4):\n fig[l[i][0]][l[i][1]] = pos[i]\n fill_col(fig, l[i][0], l[i][1], pos[i], used, usedcols)\n for x in filter(lambda x: isinstance(x, int), fig[l[i][0]]):\n used[x] = True\n todo_flag = True\n while todo_flag:\n todo_flag = False\n queue = []\n for _, group in groups:\n ps = filter(lambda (x, y): isinstance(fig[x][y], int), group)\n if len(ps) == 3:\n missing = [i for (x, y), i in zip(group, count()) if not isinstance(fig[x][y], int)][0]\n if missing == 3:\n z = checksum(*[telegram[fig[x][y]] for (x, y) in ps])\n else:\n z = missing_symbol(*[telegram[fig[x][y]] for (x, y) in ps])\n queue.append((group[missing], z))\n while queue:\n for ((x, y), z) in queue:\n candidates = [n for n in range(SPACES) if telegram[n] == z]\n na = symbol_space(f[x][:y])\n nb = symbol_space(f[x][y + 1:])\n candidates = filter(lambda n: valid_symbol(n, na, nb, used, usedcols), candidates)\n if len(candidates) == 1:\n fig[x][y] = candidates[0]\n fill_col(fig, x, y, candidates[0], used, usedcols)\n todo_flag = True\n break\n else:\n break\n print_fig(fig)\n if any([' ' in col for col in fig]):\n print \"Figure incomplete\"\n else:\n print \"Permutation found!\", find_permutation(fig)\n break", "def clean_up(molecules, input, output, procedure, energy = None):\n # It is assumed that the procedure and molecules are validated\n # Remove the Molecules' input and output structures\n for mol in molecules:\n try:\n os.remove(mol.generate_name(\"pre\" + procedure, \"CHARMM\"))\n except OSError:\n pass\n # Energy functions don't make structure outputs, so don't try to remove\n # them if they were never made\n if energy != None:\n continue\n try:\n os.remove(mol.generate_name(\"post\" + procedure, \"CHARMM\"))\n except OSError:\n pass\n # Move the input, output, and possibly energy files\n os.rename(input, \"charmm.inp\")\n os.rename(output, \"charmm.out\")\n if energy != None:\n os.rename(energy, \"charmm_energy.txt\")", "def get_decomposition(self):\n raise NotImplementedError('this should be implemented by a subclass')", "async def my_test_dff(dut):\n\n #### Create and start clock with concurrent coroutine operation\n # Clock with 50% duty cycle and a period of 10ps\n cocotb.fork(Clock(dut.clk, 10, \"ps\").start())\n\n # Syncronize with the clock\n await RisingEdge(dut.clk)\n\n #### Generate transactions\n # In this case, all possible combinations in every consecutive order \n # 2 inputs (D and rstN) = 4 possible binary combinations (00, 01, 10, 11) => 2^4 = 16 possible combinations in every consecutive order\n # Declare the number of inputs\n num_of_inputs = 2\n # Create a list of permutations for those two inputs (list: [(0, 1), (1, 0)])\n transactions = list(permutations(range(num_of_inputs), 2))\n # Permutations do not account for repeat value combinations; so add those in to get (list: [(0, 1), (1, 0), [0, 0], [1, 1]]) the 4 possible binary combinations\n for i in range(num_of_inputs):\n transactions.append([i, i])\n # Create a list of permutations on top of the list of permutations to account for the \"in every consecutive order\" part\n transactions = list(permutations(transactions, 2))\n # Again, we must add in the missed repeat value combinations; there were 4 missed this time instead of the 2 above\n for i in range(num_of_inputs):\n transactions.append(([i, i], [i, i]))\n if i == 1:\n transactions.append(([i, 0], [i, 0]))\n transactions.append(([0, i], [0, i]))\n\n # Run the simulation with the transactions generated\n for i in range(len(transactions)):\n\n # Assign the stimulus to the DUT's ports\n dut.D <= transactions[i][0][0]\n dut.rstN <= transactions[i][0][1]\n \n # Simulate some small time (less than half the period) for the random integers to reach the DUT's input ports\n await Timer(1, \"ps\")\n #print(f\"The D input: {dut.D.value}\")\n #print(f\"The rstN input: {dut.rstN.value}\")\n \n # Detect the falling edge of clock \n await FallingEdge(dut.clk)\n\n # Simulate some small time (less than half the period) for the output to update accordingly after the falling edge (aka if reset is low)\n await Timer(1, \"ps\")\n #print(f\"The output after the falling edge: {dut.Q.value}\")\n\n # Detect the rising edge of clock\n await RisingEdge(dut.clk)\n\n # Simulate some small time (less than half the period) for the output to update accordingly after the rising edge (aka if \"D\" is different than \"Q\")\n await Timer(1, \"ps\")\n #print(f\"The output after the rising edge: {dut.Q.value}\")\n\n # Assert an error message and stop simulation if the output does not match the model's output\n assert dut.Q.value == my_dff_model(transactions[i][0][0], transactions[i][0][1]), f\"Failure: Transaction - {transactions[i][0]} failed!\"\n\n #### There is a double simulation per \"for\" loop because of how the transaction was built \n # Assign the stimulus to the DUT's ports\n dut.D <= transactions[i][1][0]\n dut.rstN <= transactions[i][1][1]\n\n #Simulate some small time (less than half the period) for the random integers to reach the DUT's input ports\n await Timer(1, \"ps\")\n #print(f\"The D input: {dut.D.value}\")\n #print(f\"The rstN input: {dut.rstN.value}\")\n\n # Detect the falling edge of clock\n await FallingEdge(dut.clk)\n\n # Simulate some small time (less than half the period) for the output to update accordingly after the falling edge (aka if reset is low)\n await Timer(1, \"ps\")\n #print(f\"The output after the falling edge: {dut.Q.value}\")\n\n # Detect the rising edge of clock\n await RisingEdge(dut.clk)\n\n # Simulate some small time (less than half the period) for the output to update accordingly after the rising edge (aka if \"D\" is different than \"Q\")\n await Timer(1, \"ps\")\n #print(f\"The output after the rising edge: {dut.Q.value}\")\n\n\n assert dut.Q.value == my_dff_model(transactions[i][1][0], transactions[i][1][1]), f\"Failure: Transaction - {transactions[i][1]} failed!\"", "def demodulate(Z, over_space=True, depth=1):\n\n #do complex PCA on each IMF\n N,T = Z.shape\n\n if over_space:\n\n #construct a matrix with the real and imaginary parts separated\n X = np.zeros([2*N, T], dtype='float')\n X[:N, :] = Z.real\n X[N:, :] = Z.imag\n\n pca = PCA()\n pca.fit(X.T)\n\n complex_pcs = np.zeros([N, N], dtype='complex')\n for j in range(N):\n pc = pca.components_[j, :]\n complex_pcs[j, :].real = pc[:N]\n complex_pcs[j, :].imag = pc[N:]\n\n phase = np.angle(Z)\n for k in range(depth):\n #compute the kth PC projected component\n proj = np.dot(Z.T.squeeze(), complex_pcs[k, :].squeeze())\n phase -= np.angle(proj)\n\n else:\n\n first_pc = np.zeros([T], dtype='complex')\n\n pca_real = PCA(n_components=1, svd_solver=\"randomized\")\n pca_real.fit(Z.real)\n print('pca_real.components_.shape=',pca_real.components_.shape)\n first_pc.real = pca_real.components_.squeeze()\n \n pca_imag = PCA(n_components=1, svd_solver=\"randomized\")\n pca_imag.fit(Z.imag)\n print('pca_imag.components_.shape=',pca_imag.components_.shape)\n first_pc.imag = pca_imag.components_.squeeze()\n\n complex_pcs = np.array([first_pc])\n\n proj = first_pc\n\n #demodulate the signal\n phase = np.angle(Z) - np.angle(proj)\n\n return phase,complex_pcs", "def build_sequences(dcm):\n dimension_organization_uid = '1.2.276.0.7230010.3.1.4.8323329.20175.1573232544.237437'\n ds0 = Dataset()\n ds0.DimensionOrganizationUID = dimension_organization_uid\n dcm.DimensionOrganizationSequence = Sequence([ds0])\n del ds0\n\n ds1 = Dataset()\n ds1.DimensionOrganizationUID = dimension_organization_uid\n ds1.DimensionIndexPointer = Tag(0x0048021E)\n ds1.FunctionalGroupPointer = Tag(0x0048021A)\n\n ds2 = Dataset()\n ds2.DimensionOrganizationUID = dimension_organization_uid\n ds2.DimensionIndexPointer = Tag(0x0048021F)\n ds2.FunctionalGroupPointer = Tag(0x0048021A)\n\n dcm.DimensionIndexSequence = Sequence([ds1, ds2])\n del ds1, ds2\n\n ds3 = Dataset()\n ds3.XOffsetInSlideCoordinateSystem = 20\n ds3.YOffsetInSlideCoordinateSystem = 40\n dcm.TotalPixelMatrixOriginSequence = Sequence([ds3])\n del ds3\n\n ds4 = Dataset()\n ds5 = Dataset()\n\n # IlluminationTypeCodeSequence\n ds4.CodingSchemeDesignator = 'DCM'\n ds4.CodeMeaning = 'Brightfield illumination'\n ds4.CodeValue = '111744'\n\n # IlluminationColorCodeSequence\n ds5.CodingSchemeDesignator = 'DCM'\n ds5.CodeMeaning = 'No filter'\n ds5.CodeValue = '111609'\n\n ds7 = Dataset()\n ds7.IlluminationTypeCodeSequence = Sequence([ds4])\n ds7.IlluminationColorCodeSequence = Sequence([ds5])\n # noinspection PyPep8,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection\n ds7.ICCProfile = b'\\x00\\x00\\x1b\\nlcms\\x020\\x00\\x00mntrRGB XYZ \\x07\\xd4\\x00\\x08\\x00\\r\\x00\\x0c\\x00\\x12\\x00\\x06acspMSFT\\x00\\x00\\x00\\x00lcms\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xf6\\xd6\\x00\\x01\\x00\\x00\\x00\\x00\\xd3-lcms\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0cdmnd\\x00\\x00\\x01\\x14\\x00\\x00\\x00jdesc\\x00\\x00\\x01\\x80\\x00\\x00\\x00hdmdd\\x00\\x00\\x01\\xe8\\x00\\x00\\x00hwtpt\\x00\\x00\\x02P\\x00\\x00\\x00\\x14rXYZ\\x00\\x00\\x02d\\x00\\x00\\x00\\x14bXYZ\\x00\\x00\\x02x\\x00\\x00\\x00\\x14gXYZ\\x00\\x00\\x02\\x8c\\x00\\x00\\x00\\x14rTRC\\x00\\x00\\x02\\xa0\\x00\\x00\\x08\\x0cgTRC\\x00\\x00\\n\\xac\\x00\\x00\\x08\\x0cbTRC\\x00\\x00\\x12\\xb8\\x00\\x00\\x08\\x0cchrm\\x00\\x00\\x1a\\xc4\\x00\\x00\\x00$cprt\\x00\\x00\\x1a\\xe8\\x00\\x00\\x00!desc\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x10lcms generated \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00desc\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x05sRGB\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00desc\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x05sRGB\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00XYZ \\x00\\x00\\x00\\x00\\x00\\x00\\xf3=\\x00\\x01\\x00\\x00\\x00\\x01\\x16\\x98XYZ \\x00\\x00\\x00\\x00\\x00\\x00o\\x94\\x00\\x008\\xee\\x00\\x00\\x03\\x90XYZ \\x00\\x00\\x00\\x00\\x00\\x00$\\x9d\\x00\\x00\\x0f\\x83\\x00\\x00\\xb6\\xbeXYZ \\x00\\x00\\x00\\x00\\x00\\x00b\\xa5\\x00\\x00\\xb7\\x90\\x00\\x00\\x18\\xdecurv\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x05\\x00\\n\\x00\\x0f\\x00\\x14\\x00\\x19\\x00\\x1e\\x00#\\x00(\\x00-\\x002\\x007\\x00;\\x00@\\x00E\\x00J\\x00O\\x00T\\x00Y\\x00^\\x00c\\x00h\\x00m\\x00r\\x00w\\x00|\\x00\\x81\\x00\\x86\\x00\\x8b\\x00\\x90\\x00\\x95\\x00\\x9a\\x00\\x9f\\x00\\xa4\\x00\\xa9\\x00\\xae\\x00\\xb2\\x00\\xb7\\x00\\xbc\\x00\\xc1\\x00\\xc6\\x00\\xcb\\x00\\xd0\\x00\\xd5\\x00\\xdb\\x00\\xe0\\x00\\xe5\\x00\\xeb\\x00\\xf0\\x00\\xf6\\x00\\xfb\\x01\\x01\\x01\\x07\\x01\\r\\x01\\x13\\x01\\x19\\x01\\x1f\\x01%\\x01+\\x012\\x018\\x01>\\x01E\\x01L\\x01R\\x01Y\\x01`\\x01g\\x01n\\x01u\\x01|\\x01\\x83\\x01\\x8b\\x01\\x92\\x01\\x9a\\x01\\xa1\\x01\\xa9\\x01\\xb1\\x01\\xb9\\x01\\xc1\\x01\\xc9\\x01\\xd1\\x01\\xd9\\x01\\xe1\\x01\\xe9\\x01\\xf2\\x01\\xfa\\x02\\x03\\x02\\x0c\\x02\\x14\\x02\\x1d\\x02&\\x02/\\x028\\x02A\\x02K\\x02T\\x02]\\x02g\\x02q\\x02z\\x02\\x84\\x02\\x8e\\x02\\x98\\x02\\xa2\\x02\\xac\\x02\\xb6\\x02\\xc1\\x02\\xcb\\x02\\xd5\\x02\\xe0\\x02\\xeb\\x02\\xf5\\x03\\x00\\x03\\x0b\\x03\\x16\\x03!\\x03-\\x038\\x03C\\x03O\\x03Z\\x03f\\x03r\\x03~\\x03\\x8a\\x03\\x96\\x03\\xa2\\x03\\xae\\x03\\xba\\x03\\xc7\\x03\\xd3\\x03\\xe0\\x03\\xec\\x03\\xf9\\x04\\x06\\x04\\x13\\x04 \\x04-\\x04;\\x04H\\x04U\\x04c\\x04q\\x04~\\x04\\x8c\\x04\\x9a\\x04\\xa8\\x04\\xb6\\x04\\xc4\\x04\\xd3\\x04\\xe1\\x04\\xf0\\x04\\xfe\\x05\\r\\x05\\x1c\\x05+\\x05:\\x05I\\x05X\\x05g\\x05w\\x05\\x86\\x05\\x96\\x05\\xa6\\x05\\xb5\\x05\\xc5\\x05\\xd5\\x05\\xe5\\x05\\xf6\\x06\\x06\\x06\\x16\\x06\\'\\x067\\x06H\\x06Y\\x06j\\x06{\\x06\\x8c\\x06\\x9d\\x06\\xaf\\x06\\xc0\\x06\\xd1\\x06\\xe3\\x06\\xf5\\x07\\x07\\x07\\x19\\x07+\\x07=\\x07O\\x07a\\x07t\\x07\\x86\\x07\\x99\\x07\\xac\\x07\\xbf\\x07\\xd2\\x07\\xe5\\x07\\xf8\\x08\\x0b\\x08\\x1f\\x082\\x08F\\x08Z\\x08n\\x08\\x82\\x08\\x96\\x08\\xaa\\x08\\xbe\\x08\\xd2\\x08\\xe7\\x08\\xfb\\t\\x10\\t%\\t:\\tO\\td\\ty\\t\\x8f\\t\\xa4\\t\\xba\\t\\xcf\\t\\xe5\\t\\xfb\\n\\x11\\n\\'\\n=\\nT\\nj\\n\\x81\\n\\x98\\n\\xae\\n\\xc5\\n\\xdc\\n\\xf3\\x0b\\x0b\\x0b\"\\x0b9\\x0bQ\\x0bi\\x0b\\x80\\x0b\\x98\\x0b\\xb0\\x0b\\xc8\\x0b\\xe1\\x0b\\xf9\\x0c\\x12\\x0c*\\x0cC\\x0c\\\\\\x0cu\\x0c\\x8e\\x0c\\xa7\\x0c\\xc0\\x0c\\xd9\\x0c\\xf3\\r\\r\\r&\\r@\\rZ\\rt\\r\\x8e\\r\\xa9\\r\\xc3\\r\\xde\\r\\xf8\\x0e\\x13\\x0e.\\x0eI\\x0ed\\x0e\\x7f\\x0e\\x9b\\x0e\\xb6\\x0e\\xd2\\x0e\\xee\\x0f\\t\\x0f%\\x0fA\\x0f^\\x0fz\\x0f\\x96\\x0f\\xb3\\x0f\\xcf\\x0f\\xec\\x10\\t\\x10&\\x10C\\x10a\\x10~\\x10\\x9b\\x10\\xb9\\x10\\xd7\\x10\\xf5\\x11\\x13\\x111\\x11O\\x11m\\x11\\x8c\\x11\\xaa\\x11\\xc9\\x11\\xe8\\x12\\x07\\x12&\\x12E\\x12d\\x12\\x84\\x12\\xa3\\x12\\xc3\\x12\\xe3\\x13\\x03\\x13#\\x13C\\x13c\\x13\\x83\\x13\\xa4\\x13\\xc5\\x13\\xe5\\x14\\x06\\x14\\'\\x14I\\x14j\\x14\\x8b\\x14\\xad\\x14\\xce\\x14\\xf0\\x15\\x12\\x154\\x15V\\x15x\\x15\\x9b\\x15\\xbd\\x15\\xe0\\x16\\x03\\x16&\\x16I\\x16l\\x16\\x8f\\x16\\xb2\\x16\\xd6\\x16\\xfa\\x17\\x1d\\x17A\\x17e\\x17\\x89\\x17\\xae\\x17\\xd2\\x17\\xf7\\x18\\x1b\\x18@\\x18e\\x18\\x8a\\x18\\xaf\\x18\\xd5\\x18\\xfa\\x19 \\x19E\\x19k\\x19\\x91\\x19\\xb7\\x19\\xdd\\x1a\\x04\\x1a*\\x1aQ\\x1aw\\x1a\\x9e\\x1a\\xc5\\x1a\\xec\\x1b\\x14\\x1b;\\x1bc\\x1b\\x8a\\x1b\\xb2\\x1b\\xda\\x1c\\x02\\x1c*\\x1cR\\x1c{\\x1c\\xa3\\x1c\\xcc\\x1c\\xf5\\x1d\\x1e\\x1dG\\x1dp\\x1d\\x99\\x1d\\xc3\\x1d\\xec\\x1e\\x16\\x1e@\\x1ej\\x1e\\x94\\x1e\\xbe\\x1e\\xe9\\x1f\\x13\\x1f>\\x1fi\\x1f\\x94\\x1f\\xbf\\x1f\\xea \\x15 A l \\x98 \\xc4 \\xf0!\\x1c!H!u!\\xa1!\\xce!\\xfb\"\\'\"U\"\\x82\"\\xaf\"\\xdd#\\n#8#f#\\x94#\\xc2#\\xf0$\\x1f$M$|$\\xab$\\xda%\\t%8%h%\\x97%\\xc7%\\xf7&\\'&W&\\x87&\\xb7&\\xe8\\'\\x18\\'I\\'z\\'\\xab\\'\\xdc(\\r(?(q(\\xa2(\\xd4)\\x06)8)k)\\x9d)\\xd0*\\x02*5*h*\\x9b*\\xcf+\\x02+6+i+\\x9d+\\xd1,\\x05,9,n,\\xa2,\\xd7-\\x0c-A-v-\\xab-\\xe1.\\x16.L.\\x82.\\xb7.\\xee/$/Z/\\x91/\\xc7/\\xfe050l0\\xa40\\xdb1\\x121J1\\x821\\xba1\\xf22*2c2\\x9b2\\xd43\\r3F3\\x7f3\\xb83\\xf14+4e4\\x9e4\\xd85\\x135M5\\x875\\xc25\\xfd676r6\\xae6\\xe97$7`7\\x9c7\\xd78\\x148P8\\x8c8\\xc89\\x059B9\\x7f9\\xbc9\\xf9:6:t:\\xb2:\\xef;-;k;\\xaa;\\xe8<\\'<e<\\xa4<\\xe3=\"=a=\\xa1=\\xe0> >`>\\xa0>\\xe0?!?a?\\xa2?\\xe2@#@d@\\xa6@\\xe7A)AjA\\xacA\\xeeB0BrB\\xb5B\\xf7C:C}C\\xc0D\\x03DGD\\x8aD\\xceE\\x12EUE\\x9aE\\xdeF\"FgF\\xabF\\xf0G5G{G\\xc0H\\x05HKH\\x91H\\xd7I\\x1dIcI\\xa9I\\xf0J7J}J\\xc4K\\x0cKSK\\x9aK\\xe2L*LrL\\xbaM\\x02MJM\\x93M\\xdcN%NnN\\xb7O\\x00OIO\\x93O\\xddP\\'PqP\\xbbQ\\x06QPQ\\x9bQ\\xe6R1R|R\\xc7S\\x13S_S\\xaaS\\xf6TBT\\x8fT\\xdbU(UuU\\xc2V\\x0fV\\\\V\\xa9V\\xf7WDW\\x92W\\xe0X/X}X\\xcbY\\x1aYiY\\xb8Z\\x07ZVZ\\xa6Z\\xf5[E[\\x95[\\xe5\\\\5\\\\\\x86\\\\\\xd6]\\']x]\\xc9^\\x1a^l^\\xbd_\\x0f_a_\\xb3`\\x05`W`\\xaa`\\xfcaOa\\xa2a\\xf5bIb\\x9cb\\xf0cCc\\x97c\\xebd@d\\x94d\\xe9e=e\\x92e\\xe7f=f\\x92f\\xe8g=g\\x93g\\xe9h?h\\x96h\\xeciCi\\x9ai\\xf1jHj\\x9fj\\xf7kOk\\xa7k\\xfflWl\\xafm\\x08m`m\\xb9n\\x12nkn\\xc4o\\x1eoxo\\xd1p+p\\x86p\\xe0q:q\\x95q\\xf0rKr\\xa6s\\x01s]s\\xb8t\\x14tpt\\xccu(u\\x85u\\xe1v>v\\x9bv\\xf8wVw\\xb3x\\x11xnx\\xccy*y\\x89y\\xe7zFz\\xa5{\\x04{c{\\xc2|!|\\x81|\\xe1}A}\\xa1~\\x01~b~\\xc2\\x7f#\\x7f\\x84\\x7f\\xe5\\x80G\\x80\\xa8\\x81\\n\\x81k\\x81\\xcd\\x820\\x82\\x92\\x82\\xf4\\x83W\\x83\\xba\\x84\\x1d\\x84\\x80\\x84\\xe3\\x85G\\x85\\xab\\x86\\x0e\\x86r\\x86\\xd7\\x87;\\x87\\x9f\\x88\\x04\\x88i\\x88\\xce\\x893\\x89\\x99\\x89\\xfe\\x8ad\\x8a\\xca\\x8b0\\x8b\\x96\\x8b\\xfc\\x8cc\\x8c\\xca\\x8d1\\x8d\\x98\\x8d\\xff\\x8ef\\x8e\\xce\\x8f6\\x8f\\x9e\\x90\\x06\\x90n\\x90\\xd6\\x91?\\x91\\xa8\\x92\\x11\\x92z\\x92\\xe3\\x93M\\x93\\xb6\\x94 \\x94\\x8a\\x94\\xf4\\x95_\\x95\\xc9\\x964\\x96\\x9f\\x97\\n\\x97u\\x97\\xe0\\x98L\\x98\\xb8\\x99$\\x99\\x90\\x99\\xfc\\x9ah\\x9a\\xd5\\x9bB\\x9b\\xaf\\x9c\\x1c\\x9c\\x89\\x9c\\xf7\\x9dd\\x9d\\xd2\\x9e@\\x9e\\xae\\x9f\\x1d\\x9f\\x8b\\x9f\\xfa\\xa0i\\xa0\\xd8\\xa1G\\xa1\\xb6\\xa2&\\xa2\\x96\\xa3\\x06\\xa3v\\xa3\\xe6\\xa4V\\xa4\\xc7\\xa58\\xa5\\xa9\\xa6\\x1a\\xa6\\x8b\\xa6\\xfd\\xa7n\\xa7\\xe0\\xa8R\\xa8\\xc4\\xa97\\xa9\\xa9\\xaa\\x1c\\xaa\\x8f\\xab\\x02\\xabu\\xab\\xe9\\xac\\\\\\xac\\xd0\\xadD\\xad\\xb8\\xae-\\xae\\xa1\\xaf\\x16\\xaf\\x8b\\xb0\\x00\\xb0u\\xb0\\xea\\xb1`\\xb1\\xd6\\xb2K\\xb2\\xc2\\xb38\\xb3\\xae\\xb4%\\xb4\\x9c\\xb5\\x13\\xb5\\x8a\\xb6\\x01\\xb6y\\xb6\\xf0\\xb7h\\xb7\\xe0\\xb8Y\\xb8\\xd1\\xb9J\\xb9\\xc2\\xba;\\xba\\xb5\\xbb.\\xbb\\xa7\\xbc!\\xbc\\x9b\\xbd\\x15\\xbd\\x8f\\xbe\\n\\xbe\\x84\\xbe\\xff\\xbfz\\xbf\\xf5\\xc0p\\xc0\\xec\\xc1g\\xc1\\xe3\\xc2_\\xc2\\xdb\\xc3X\\xc3\\xd4\\xc4Q\\xc4\\xce\\xc5K\\xc5\\xc8\\xc6F\\xc6\\xc3\\xc7A\\xc7\\xbf\\xc8=\\xc8\\xbc\\xc9:\\xc9\\xb9\\xca8\\xca\\xb7\\xcb6\\xcb\\xb6\\xcc5\\xcc\\xb5\\xcd5\\xcd\\xb5\\xce6\\xce\\xb6\\xcf7\\xcf\\xb8\\xd09\\xd0\\xba\\xd1<\\xd1\\xbe\\xd2?\\xd2\\xc1\\xd3D\\xd3\\xc6\\xd4I\\xd4\\xcb\\xd5N\\xd5\\xd1\\xd6U\\xd6\\xd8\\xd7\\\\\\xd7\\xe0\\xd8d\\xd8\\xe8\\xd9l\\xd9\\xf1\\xdav\\xda\\xfb\\xdb\\x80\\xdc\\x05\\xdc\\x8a\\xdd\\x10\\xdd\\x96\\xde\\x1c\\xde\\xa2\\xdf)\\xdf\\xaf\\xe06\\xe0\\xbd\\xe1D\\xe1\\xcc\\xe2S\\xe2\\xdb\\xe3c\\xe3\\xeb\\xe4s\\xe4\\xfc\\xe5\\x84\\xe6\\r\\xe6\\x96\\xe7\\x1f\\xe7\\xa9\\xe82\\xe8\\xbc\\xe9F\\xe9\\xd0\\xea[\\xea\\xe5\\xebp\\xeb\\xfb\\xec\\x86\\xed\\x11\\xed\\x9c\\xee(\\xee\\xb4\\xef@\\xef\\xcc\\xf0X\\xf0\\xe5\\xf1r\\xf1\\xff\\xf2\\x8c\\xf3\\x19\\xf3\\xa7\\xf44\\xf4\\xc2\\xf5P\\xf5\\xde\\xf6m\\xf6\\xfb\\xf7\\x8a\\xf8\\x19\\xf8\\xa8\\xf98\\xf9\\xc7\\xfaW\\xfa\\xe7\\xfbw\\xfc\\x07\\xfc\\x98\\xfd)\\xfd\\xba\\xfeK\\xfe\\xdc\\xffm\\xff\\xffcurv\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x05\\x00\\n\\x00\\x0f\\x00\\x14\\x00\\x19\\x00\\x1e\\x00#\\x00(\\x00-\\x002\\x007\\x00;\\x00@\\x00E\\x00J\\x00O\\x00T\\x00Y\\x00^\\x00c\\x00h\\x00m\\x00r\\x00w\\x00|\\x00\\x81\\x00\\x86\\x00\\x8b\\x00\\x90\\x00\\x95\\x00\\x9a\\x00\\x9f\\x00\\xa4\\x00\\xa9\\x00\\xae\\x00\\xb2\\x00\\xb7\\x00\\xbc\\x00\\xc1\\x00\\xc6\\x00\\xcb\\x00\\xd0\\x00\\xd5\\x00\\xdb\\x00\\xe0\\x00\\xe5\\x00\\xeb\\x00\\xf0\\x00\\xf6\\x00\\xfb\\x01\\x01\\x01\\x07\\x01\\r\\x01\\x13\\x01\\x19\\x01\\x1f\\x01%\\x01+\\x012\\x018\\x01>\\x01E\\x01L\\x01R\\x01Y\\x01`\\x01g\\x01n\\x01u\\x01|\\x01\\x83\\x01\\x8b\\x01\\x92\\x01\\x9a\\x01\\xa1\\x01\\xa9\\x01\\xb1\\x01\\xb9\\x01\\xc1\\x01\\xc9\\x01\\xd1\\x01\\xd9\\x01\\xe1\\x01\\xe9\\x01\\xf2\\x01\\xfa\\x02\\x03\\x02\\x0c\\x02\\x14\\x02\\x1d\\x02&\\x02/\\x028\\x02A\\x02K\\x02T\\x02]\\x02g\\x02q\\x02z\\x02\\x84\\x02\\x8e\\x02\\x98\\x02\\xa2\\x02\\xac\\x02\\xb6\\x02\\xc1\\x02\\xcb\\x02\\xd5\\x02\\xe0\\x02\\xeb\\x02\\xf5\\x03\\x00\\x03\\x0b\\x03\\x16\\x03!\\x03-\\x038\\x03C\\x03O\\x03Z\\x03f\\x03r\\x03~\\x03\\x8a\\x03\\x96\\x03\\xa2\\x03\\xae\\x03\\xba\\x03\\xc7\\x03\\xd3\\x03\\xe0\\x03\\xec\\x03\\xf9\\x04\\x06\\x04\\x13\\x04 \\x04-\\x04;\\x04H\\x04U\\x04c\\x04q\\x04~\\x04\\x8c\\x04\\x9a\\x04\\xa8\\x04\\xb6\\x04\\xc4\\x04\\xd3\\x04\\xe1\\x04\\xf0\\x04\\xfe\\x05\\r\\x05\\x1c\\x05+\\x05:\\x05I\\x05X\\x05g\\x05w\\x05\\x86\\x05\\x96\\x05\\xa6\\x05\\xb5\\x05\\xc5\\x05\\xd5\\x05\\xe5\\x05\\xf6\\x06\\x06\\x06\\x16\\x06\\'\\x067\\x06H\\x06Y\\x06j\\x06{\\x06\\x8c\\x06\\x9d\\x06\\xaf\\x06\\xc0\\x06\\xd1\\x06\\xe3\\x06\\xf5\\x07\\x07\\x07\\x19\\x07+\\x07=\\x07O\\x07a\\x07t\\x07\\x86\\x07\\x99\\x07\\xac\\x07\\xbf\\x07\\xd2\\x07\\xe5\\x07\\xf8\\x08\\x0b\\x08\\x1f\\x082\\x08F\\x08Z\\x08n\\x08\\x82\\x08\\x96\\x08\\xaa\\x08\\xbe\\x08\\xd2\\x08\\xe7\\x08\\xfb\\t\\x10\\t%\\t:\\tO\\td\\ty\\t\\x8f\\t\\xa4\\t\\xba\\t\\xcf\\t\\xe5\\t\\xfb\\n\\x11\\n\\'\\n=\\nT\\nj\\n\\x81\\n\\x98\\n\\xae\\n\\xc5\\n\\xdc\\n\\xf3\\x0b\\x0b\\x0b\"\\x0b9\\x0bQ\\x0bi\\x0b\\x80\\x0b\\x98\\x0b\\xb0\\x0b\\xc8\\x0b\\xe1\\x0b\\xf9\\x0c\\x12\\x0c*\\x0cC\\x0c\\\\\\x0cu\\x0c\\x8e\\x0c\\xa7\\x0c\\xc0\\x0c\\xd9\\x0c\\xf3\\r\\r\\r&\\r@\\rZ\\rt\\r\\x8e\\r\\xa9\\r\\xc3\\r\\xde\\r\\xf8\\x0e\\x13\\x0e.\\x0eI\\x0ed\\x0e\\x7f\\x0e\\x9b\\x0e\\xb6\\x0e\\xd2\\x0e\\xee\\x0f\\t\\x0f%\\x0fA\\x0f^\\x0fz\\x0f\\x96\\x0f\\xb3\\x0f\\xcf\\x0f\\xec\\x10\\t\\x10&\\x10C\\x10a\\x10~\\x10\\x9b\\x10\\xb9\\x10\\xd7\\x10\\xf5\\x11\\x13\\x111\\x11O\\x11m\\x11\\x8c\\x11\\xaa\\x11\\xc9\\x11\\xe8\\x12\\x07\\x12&\\x12E\\x12d\\x12\\x84\\x12\\xa3\\x12\\xc3\\x12\\xe3\\x13\\x03\\x13#\\x13C\\x13c\\x13\\x83\\x13\\xa4\\x13\\xc5\\x13\\xe5\\x14\\x06\\x14\\'\\x14I\\x14j\\x14\\x8b\\x14\\xad\\x14\\xce\\x14\\xf0\\x15\\x12\\x154\\x15V\\x15x\\x15\\x9b\\x15\\xbd\\x15\\xe0\\x16\\x03\\x16&\\x16I\\x16l\\x16\\x8f\\x16\\xb2\\x16\\xd6\\x16\\xfa\\x17\\x1d\\x17A\\x17e\\x17\\x89\\x17\\xae\\x17\\xd2\\x17\\xf7\\x18\\x1b\\x18@\\x18e\\x18\\x8a\\x18\\xaf\\x18\\xd5\\x18\\xfa\\x19 \\x19E\\x19k\\x19\\x91\\x19\\xb7\\x19\\xdd\\x1a\\x04\\x1a*\\x1aQ\\x1aw\\x1a\\x9e\\x1a\\xc5\\x1a\\xec\\x1b\\x14\\x1b;\\x1bc\\x1b\\x8a\\x1b\\xb2\\x1b\\xda\\x1c\\x02\\x1c*\\x1cR\\x1c{\\x1c\\xa3\\x1c\\xcc\\x1c\\xf5\\x1d\\x1e\\x1dG\\x1dp\\x1d\\x99\\x1d\\xc3\\x1d\\xec\\x1e\\x16\\x1e@\\x1ej\\x1e\\x94\\x1e\\xbe\\x1e\\xe9\\x1f\\x13\\x1f>\\x1fi\\x1f\\x94\\x1f\\xbf\\x1f\\xea \\x15 A l \\x98 \\xc4 \\xf0!\\x1c!H!u!\\xa1!\\xce!\\xfb\"\\'\"U\"\\x82\"\\xaf\"\\xdd#\\n#8#f#\\x94#\\xc2#\\xf0$\\x1f$M$|$\\xab$\\xda%\\t%8%h%\\x97%\\xc7%\\xf7&\\'&W&\\x87&\\xb7&\\xe8\\'\\x18\\'I\\'z\\'\\xab\\'\\xdc(\\r(?(q(\\xa2(\\xd4)\\x06)8)k)\\x9d)\\xd0*\\x02*5*h*\\x9b*\\xcf+\\x02+6+i+\\x9d+\\xd1,\\x05,9,n,\\xa2,\\xd7-\\x0c-A-v-\\xab-\\xe1.\\x16.L.\\x82.\\xb7.\\xee/$/Z/\\x91/\\xc7/\\xfe050l0\\xa40\\xdb1\\x121J1\\x821\\xba1\\xf22*2c2\\x9b2\\xd43\\r3F3\\x7f3\\xb83\\xf14+4e4\\x9e4\\xd85\\x135M5\\x875\\xc25\\xfd676r6\\xae6\\xe97$7`7\\x9c7\\xd78\\x148P8\\x8c8\\xc89\\x059B9\\x7f9\\xbc9\\xf9:6:t:\\xb2:\\xef;-;k;\\xaa;\\xe8<\\'<e<\\xa4<\\xe3=\"=a=\\xa1=\\xe0> >`>\\xa0>\\xe0?!?a?\\xa2?\\xe2@#@d@\\xa6@\\xe7A)AjA\\xacA\\xeeB0BrB\\xb5B\\xf7C:C}C\\xc0D\\x03DGD\\x8aD\\xceE\\x12EUE\\x9aE\\xdeF\"FgF\\xabF\\xf0G5G{G\\xc0H\\x05HKH\\x91H\\xd7I\\x1dIcI\\xa9I\\xf0J7J}J\\xc4K\\x0cKSK\\x9aK\\xe2L*LrL\\xbaM\\x02MJM\\x93M\\xdcN%NnN\\xb7O\\x00OIO\\x93O\\xddP\\'PqP\\xbbQ\\x06QPQ\\x9bQ\\xe6R1R|R\\xc7S\\x13S_S\\xaaS\\xf6TBT\\x8fT\\xdbU(UuU\\xc2V\\x0fV\\\\V\\xa9V\\xf7WDW\\x92W\\xe0X/X}X\\xcbY\\x1aYiY\\xb8Z\\x07ZVZ\\xa6Z\\xf5[E[\\x95[\\xe5\\\\5\\\\\\x86\\\\\\xd6]\\']x]\\xc9^\\x1a^l^\\xbd_\\x0f_a_\\xb3`\\x05`W`\\xaa`\\xfcaOa\\xa2a\\xf5bIb\\x9cb\\xf0cCc\\x97c\\xebd@d\\x94d\\xe9e=e\\x92e\\xe7f=f\\x92f\\xe8g=g\\x93g\\xe9h?h\\x96h\\xeciCi\\x9ai\\xf1jHj\\x9fj\\xf7kOk\\xa7k\\xfflWl\\xafm\\x08m`m\\xb9n\\x12nkn\\xc4o\\x1eoxo\\xd1p+p\\x86p\\xe0q:q\\x95q\\xf0rKr\\xa6s\\x01s]s\\xb8t\\x14tpt\\xccu(u\\x85u\\xe1v>v\\x9bv\\xf8wVw\\xb3x\\x11xnx\\xccy*y\\x89y\\xe7zFz\\xa5{\\x04{c{\\xc2|!|\\x81|\\xe1}A}\\xa1~\\x01~b~\\xc2\\x7f#\\x7f\\x84\\x7f\\xe5\\x80G\\x80\\xa8\\x81\\n\\x81k\\x81\\xcd\\x820\\x82\\x92\\x82\\xf4\\x83W\\x83\\xba\\x84\\x1d\\x84\\x80\\x84\\xe3\\x85G\\x85\\xab\\x86\\x0e\\x86r\\x86\\xd7\\x87;\\x87\\x9f\\x88\\x04\\x88i\\x88\\xce\\x893\\x89\\x99\\x89\\xfe\\x8ad\\x8a\\xca\\x8b0\\x8b\\x96\\x8b\\xfc\\x8cc\\x8c\\xca\\x8d1\\x8d\\x98\\x8d\\xff\\x8ef\\x8e\\xce\\x8f6\\x8f\\x9e\\x90\\x06\\x90n\\x90\\xd6\\x91?\\x91\\xa8\\x92\\x11\\x92z\\x92\\xe3\\x93M\\x93\\xb6\\x94 \\x94\\x8a\\x94\\xf4\\x95_\\x95\\xc9\\x964\\x96\\x9f\\x97\\n\\x97u\\x97\\xe0\\x98L\\x98\\xb8\\x99$\\x99\\x90\\x99\\xfc\\x9ah\\x9a\\xd5\\x9bB\\x9b\\xaf\\x9c\\x1c\\x9c\\x89\\x9c\\xf7\\x9dd\\x9d\\xd2\\x9e@\\x9e\\xae\\x9f\\x1d\\x9f\\x8b\\x9f\\xfa\\xa0i\\xa0\\xd8\\xa1G\\xa1\\xb6\\xa2&\\xa2\\x96\\xa3\\x06\\xa3v\\xa3\\xe6\\xa4V\\xa4\\xc7\\xa58\\xa5\\xa9\\xa6\\x1a\\xa6\\x8b\\xa6\\xfd\\xa7n\\xa7\\xe0\\xa8R\\xa8\\xc4\\xa97\\xa9\\xa9\\xaa\\x1c\\xaa\\x8f\\xab\\x02\\xabu\\xab\\xe9\\xac\\\\\\xac\\xd0\\xadD\\xad\\xb8\\xae-\\xae\\xa1\\xaf\\x16\\xaf\\x8b\\xb0\\x00\\xb0u\\xb0\\xea\\xb1`\\xb1\\xd6\\xb2K\\xb2\\xc2\\xb38\\xb3\\xae\\xb4%\\xb4\\x9c\\xb5\\x13\\xb5\\x8a\\xb6\\x01\\xb6y\\xb6\\xf0\\xb7h\\xb7\\xe0\\xb8Y\\xb8\\xd1\\xb9J\\xb9\\xc2\\xba;\\xba\\xb5\\xbb.\\xbb\\xa7\\xbc!\\xbc\\x9b\\xbd\\x15\\xbd\\x8f\\xbe\\n\\xbe\\x84\\xbe\\xff\\xbfz\\xbf\\xf5\\xc0p\\xc0\\xec\\xc1g\\xc1\\xe3\\xc2_\\xc2\\xdb\\xc3X\\xc3\\xd4\\xc4Q\\xc4\\xce\\xc5K\\xc5\\xc8\\xc6F\\xc6\\xc3\\xc7A\\xc7\\xbf\\xc8=\\xc8\\xbc\\xc9:\\xc9\\xb9\\xca8\\xca\\xb7\\xcb6\\xcb\\xb6\\xcc5\\xcc\\xb5\\xcd5\\xcd\\xb5\\xce6\\xce\\xb6\\xcf7\\xcf\\xb8\\xd09\\xd0\\xba\\xd1<\\xd1\\xbe\\xd2?\\xd2\\xc1\\xd3D\\xd3\\xc6\\xd4I\\xd4\\xcb\\xd5N\\xd5\\xd1\\xd6U\\xd6\\xd8\\xd7\\\\\\xd7\\xe0\\xd8d\\xd8\\xe8\\xd9l\\xd9\\xf1\\xdav\\xda\\xfb\\xdb\\x80\\xdc\\x05\\xdc\\x8a\\xdd\\x10\\xdd\\x96\\xde\\x1c\\xde\\xa2\\xdf)\\xdf\\xaf\\xe06\\xe0\\xbd\\xe1D\\xe1\\xcc\\xe2S\\xe2\\xdb\\xe3c\\xe3\\xeb\\xe4s\\xe4\\xfc\\xe5\\x84\\xe6\\r\\xe6\\x96\\xe7\\x1f\\xe7\\xa9\\xe82\\xe8\\xbc\\xe9F\\xe9\\xd0\\xea[\\xea\\xe5\\xebp\\xeb\\xfb\\xec\\x86\\xed\\x11\\xed\\x9c\\xee(\\xee\\xb4\\xef@\\xef\\xcc\\xf0X\\xf0\\xe5\\xf1r\\xf1\\xff\\xf2\\x8c\\xf3\\x19\\xf3\\xa7\\xf44\\xf4\\xc2\\xf5P\\xf5\\xde\\xf6m\\xf6\\xfb\\xf7\\x8a\\xf8\\x19\\xf8\\xa8\\xf98\\xf9\\xc7\\xfaW\\xfa\\xe7\\xfbw\\xfc\\x07\\xfc\\x98\\xfd)\\xfd\\xba\\xfeK\\xfe\\xdc\\xffm\\xff\\xffcurv\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x05\\x00\\n\\x00\\x0f\\x00\\x14\\x00\\x19\\x00\\x1e\\x00#\\x00(\\x00-\\x002\\x007\\x00;\\x00@\\x00E\\x00J\\x00O\\x00T\\x00Y\\x00^\\x00c\\x00h\\x00m\\x00r\\x00w\\x00|\\x00\\x81\\x00\\x86\\x00\\x8b\\x00\\x90\\x00\\x95\\x00\\x9a\\x00\\x9f\\x00\\xa4\\x00\\xa9\\x00\\xae\\x00\\xb2\\x00\\xb7\\x00\\xbc\\x00\\xc1\\x00\\xc6\\x00\\xcb\\x00\\xd0\\x00\\xd5\\x00\\xdb\\x00\\xe0\\x00\\xe5\\x00\\xeb\\x00\\xf0\\x00\\xf6\\x00\\xfb\\x01\\x01\\x01\\x07\\x01\\r\\x01\\x13\\x01\\x19\\x01\\x1f\\x01%\\x01+\\x012\\x018\\x01>\\x01E\\x01L\\x01R\\x01Y\\x01`\\x01g\\x01n\\x01u\\x01|\\x01\\x83\\x01\\x8b\\x01\\x92\\x01\\x9a\\x01\\xa1\\x01\\xa9\\x01\\xb1\\x01\\xb9\\x01\\xc1\\x01\\xc9\\x01\\xd1\\x01\\xd9\\x01\\xe1\\x01\\xe9\\x01\\xf2\\x01\\xfa\\x02\\x03\\x02\\x0c\\x02\\x14\\x02\\x1d\\x02&\\x02/\\x028\\x02A\\x02K\\x02T\\x02]\\x02g\\x02q\\x02z\\x02\\x84\\x02\\x8e\\x02\\x98\\x02\\xa2\\x02\\xac\\x02\\xb6\\x02\\xc1\\x02\\xcb\\x02\\xd5\\x02\\xe0\\x02\\xeb\\x02\\xf5\\x03\\x00\\x03\\x0b\\x03\\x16\\x03!\\x03-\\x038\\x03C\\x03O\\x03Z\\x03f\\x03r\\x03~\\x03\\x8a\\x03\\x96\\x03\\xa2\\x03\\xae\\x03\\xba\\x03\\xc7\\x03\\xd3\\x03\\xe0\\x03\\xec\\x03\\xf9\\x04\\x06\\x04\\x13\\x04 \\x04-\\x04;\\x04H\\x04U\\x04c\\x04q\\x04~\\x04\\x8c\\x04\\x9a\\x04\\xa8\\x04\\xb6\\x04\\xc4\\x04\\xd3\\x04\\xe1\\x04\\xf0\\x04\\xfe\\x05\\r\\x05\\x1c\\x05+\\x05:\\x05I\\x05X\\x05g\\x05w\\x05\\x86\\x05\\x96\\x05\\xa6\\x05\\xb5\\x05\\xc5\\x05\\xd5\\x05\\xe5\\x05\\xf6\\x06\\x06\\x06\\x16\\x06\\'\\x067\\x06H\\x06Y\\x06j\\x06{\\x06\\x8c\\x06\\x9d\\x06\\xaf\\x06\\xc0\\x06\\xd1\\x06\\xe3\\x06\\xf5\\x07\\x07\\x07\\x19\\x07+\\x07=\\x07O\\x07a\\x07t\\x07\\x86\\x07\\x99\\x07\\xac\\x07\\xbf\\x07\\xd2\\x07\\xe5\\x07\\xf8\\x08\\x0b\\x08\\x1f\\x082\\x08F\\x08Z\\x08n\\x08\\x82\\x08\\x96\\x08\\xaa\\x08\\xbe\\x08\\xd2\\x08\\xe7\\x08\\xfb\\t\\x10\\t%\\t:\\tO\\td\\ty\\t\\x8f\\t\\xa4\\t\\xba\\t\\xcf\\t\\xe5\\t\\xfb\\n\\x11\\n\\'\\n=\\nT\\nj\\n\\x81\\n\\x98\\n\\xae\\n\\xc5\\n\\xdc\\n\\xf3\\x0b\\x0b\\x0b\"\\x0b9\\x0bQ\\x0bi\\x0b\\x80\\x0b\\x98\\x0b\\xb0\\x0b\\xc8\\x0b\\xe1\\x0b\\xf9\\x0c\\x12\\x0c*\\x0cC\\x0c\\\\\\x0cu\\x0c\\x8e\\x0c\\xa7\\x0c\\xc0\\x0c\\xd9\\x0c\\xf3\\r\\r\\r&\\r@\\rZ\\rt\\r\\x8e\\r\\xa9\\r\\xc3\\r\\xde\\r\\xf8\\x0e\\x13\\x0e.\\x0eI\\x0ed\\x0e\\x7f\\x0e\\x9b\\x0e\\xb6\\x0e\\xd2\\x0e\\xee\\x0f\\t\\x0f%\\x0fA\\x0f^\\x0fz\\x0f\\x96\\x0f\\xb3\\x0f\\xcf\\x0f\\xec\\x10\\t\\x10&\\x10C\\x10a\\x10~\\x10\\x9b\\x10\\xb9\\x10\\xd7\\x10\\xf5\\x11\\x13\\x111\\x11O\\x11m\\x11\\x8c\\x11\\xaa\\x11\\xc9\\x11\\xe8\\x12\\x07\\x12&\\x12E\\x12d\\x12\\x84\\x12\\xa3\\x12\\xc3\\x12\\xe3\\x13\\x03\\x13#\\x13C\\x13c\\x13\\x83\\x13\\xa4\\x13\\xc5\\x13\\xe5\\x14\\x06\\x14\\'\\x14I\\x14j\\x14\\x8b\\x14\\xad\\x14\\xce\\x14\\xf0\\x15\\x12\\x154\\x15V\\x15x\\x15\\x9b\\x15\\xbd\\x15\\xe0\\x16\\x03\\x16&\\x16I\\x16l\\x16\\x8f\\x16\\xb2\\x16\\xd6\\x16\\xfa\\x17\\x1d\\x17A\\x17e\\x17\\x89\\x17\\xae\\x17\\xd2\\x17\\xf7\\x18\\x1b\\x18@\\x18e\\x18\\x8a\\x18\\xaf\\x18\\xd5\\x18\\xfa\\x19 \\x19E\\x19k\\x19\\x91\\x19\\xb7\\x19\\xdd\\x1a\\x04\\x1a*\\x1aQ\\x1aw\\x1a\\x9e\\x1a\\xc5\\x1a\\xec\\x1b\\x14\\x1b;\\x1bc\\x1b\\x8a\\x1b\\xb2\\x1b\\xda\\x1c\\x02\\x1c*\\x1cR\\x1c{\\x1c\\xa3\\x1c\\xcc\\x1c\\xf5\\x1d\\x1e\\x1dG\\x1dp\\x1d\\x99\\x1d\\xc3\\x1d\\xec\\x1e\\x16\\x1e@\\x1ej\\x1e\\x94\\x1e\\xbe\\x1e\\xe9\\x1f\\x13\\x1f>\\x1fi\\x1f\\x94\\x1f\\xbf\\x1f\\xea \\x15 A l \\x98 \\xc4 \\xf0!\\x1c!H!u!\\xa1!\\xce!\\xfb\"\\'\"U\"\\x82\"\\xaf\"\\xdd#\\n#8#f#\\x94#\\xc2#\\xf0$\\x1f$M$|$\\xab$\\xda%\\t%8%h%\\x97%\\xc7%\\xf7&\\'&W&\\x87&\\xb7&\\xe8\\'\\x18\\'I\\'z\\'\\xab\\'\\xdc(\\r(?(q(\\xa2(\\xd4)\\x06)8)k)\\x9d)\\xd0*\\x02*5*h*\\x9b*\\xcf+\\x02+6+i+\\x9d+\\xd1,\\x05,9,n,\\xa2,\\xd7-\\x0c-A-v-\\xab-\\xe1.\\x16.L.\\x82.\\xb7.\\xee/$/Z/\\x91/\\xc7/\\xfe050l0\\xa40\\xdb1\\x121J1\\x821\\xba1\\xf22*2c2\\x9b2\\xd43\\r3F3\\x7f3\\xb83\\xf14+4e4\\x9e4\\xd85\\x135M5\\x875\\xc25\\xfd676r6\\xae6\\xe97$7`7\\x9c7\\xd78\\x148P8\\x8c8\\xc89\\x059B9\\x7f9\\xbc9\\xf9:6:t:\\xb2:\\xef;-;k;\\xaa;\\xe8<\\'<e<\\xa4<\\xe3=\"=a=\\xa1=\\xe0> >`>\\xa0>\\xe0?!?a?\\xa2?\\xe2@#@d@\\xa6@\\xe7A)AjA\\xacA\\xeeB0BrB\\xb5B\\xf7C:C}C\\xc0D\\x03DGD\\x8aD\\xceE\\x12EUE\\x9aE\\xdeF\"FgF\\xabF\\xf0G5G{G\\xc0H\\x05HKH\\x91H\\xd7I\\x1dIcI\\xa9I\\xf0J7J}J\\xc4K\\x0cKSK\\x9aK\\xe2L*LrL\\xbaM\\x02MJM\\x93M\\xdcN%NnN\\xb7O\\x00OIO\\x93O\\xddP\\'PqP\\xbbQ\\x06QPQ\\x9bQ\\xe6R1R|R\\xc7S\\x13S_S\\xaaS\\xf6TBT\\x8fT\\xdbU(UuU\\xc2V\\x0fV\\\\V\\xa9V\\xf7WDW\\x92W\\xe0X/X}X\\xcbY\\x1aYiY\\xb8Z\\x07ZVZ\\xa6Z\\xf5[E[\\x95[\\xe5\\\\5\\\\\\x86\\\\\\xd6]\\']x]\\xc9^\\x1a^l^\\xbd_\\x0f_a_\\xb3`\\x05`W`\\xaa`\\xfcaOa\\xa2a\\xf5bIb\\x9cb\\xf0cCc\\x97c\\xebd@d\\x94d\\xe9e=e\\x92e\\xe7f=f\\x92f\\xe8g=g\\x93g\\xe9h?h\\x96h\\xeciCi\\x9ai\\xf1jHj\\x9fj\\xf7kOk\\xa7k\\xfflWl\\xafm\\x08m`m\\xb9n\\x12nkn\\xc4o\\x1eoxo\\xd1p+p\\x86p\\xe0q:q\\x95q\\xf0rKr\\xa6s\\x01s]s\\xb8t\\x14tpt\\xccu(u\\x85u\\xe1v>v\\x9bv\\xf8wVw\\xb3x\\x11xnx\\xccy*y\\x89y\\xe7zFz\\xa5{\\x04{c{\\xc2|!|\\x81|\\xe1}A}\\xa1~\\x01~b~\\xc2\\x7f#\\x7f\\x84\\x7f\\xe5\\x80G\\x80\\xa8\\x81\\n\\x81k\\x81\\xcd\\x820\\x82\\x92\\x82\\xf4\\x83W\\x83\\xba\\x84\\x1d\\x84\\x80\\x84\\xe3\\x85G\\x85\\xab\\x86\\x0e\\x86r\\x86\\xd7\\x87;\\x87\\x9f\\x88\\x04\\x88i\\x88\\xce\\x893\\x89\\x99\\x89\\xfe\\x8ad\\x8a\\xca\\x8b0\\x8b\\x96\\x8b\\xfc\\x8cc\\x8c\\xca\\x8d1\\x8d\\x98\\x8d\\xff\\x8ef\\x8e\\xce\\x8f6\\x8f\\x9e\\x90\\x06\\x90n\\x90\\xd6\\x91?\\x91\\xa8\\x92\\x11\\x92z\\x92\\xe3\\x93M\\x93\\xb6\\x94 \\x94\\x8a\\x94\\xf4\\x95_\\x95\\xc9\\x964\\x96\\x9f\\x97\\n\\x97u\\x97\\xe0\\x98L\\x98\\xb8\\x99$\\x99\\x90\\x99\\xfc\\x9ah\\x9a\\xd5\\x9bB\\x9b\\xaf\\x9c\\x1c\\x9c\\x89\\x9c\\xf7\\x9dd\\x9d\\xd2\\x9e@\\x9e\\xae\\x9f\\x1d\\x9f\\x8b\\x9f\\xfa\\xa0i\\xa0\\xd8\\xa1G\\xa1\\xb6\\xa2&\\xa2\\x96\\xa3\\x06\\xa3v\\xa3\\xe6\\xa4V\\xa4\\xc7\\xa58\\xa5\\xa9\\xa6\\x1a\\xa6\\x8b\\xa6\\xfd\\xa7n\\xa7\\xe0\\xa8R\\xa8\\xc4\\xa97\\xa9\\xa9\\xaa\\x1c\\xaa\\x8f\\xab\\x02\\xabu\\xab\\xe9\\xac\\\\\\xac\\xd0\\xadD\\xad\\xb8\\xae-\\xae\\xa1\\xaf\\x16\\xaf\\x8b\\xb0\\x00\\xb0u\\xb0\\xea\\xb1`\\xb1\\xd6\\xb2K\\xb2\\xc2\\xb38\\xb3\\xae\\xb4%\\xb4\\x9c\\xb5\\x13\\xb5\\x8a\\xb6\\x01\\xb6y\\xb6\\xf0\\xb7h\\xb7\\xe0\\xb8Y\\xb8\\xd1\\xb9J\\xb9\\xc2\\xba;\\xba\\xb5\\xbb.\\xbb\\xa7\\xbc!\\xbc\\x9b\\xbd\\x15\\xbd\\x8f\\xbe\\n\\xbe\\x84\\xbe\\xff\\xbfz\\xbf\\xf5\\xc0p\\xc0\\xec\\xc1g\\xc1\\xe3\\xc2_\\xc2\\xdb\\xc3X\\xc3\\xd4\\xc4Q\\xc4\\xce\\xc5K\\xc5\\xc8\\xc6F\\xc6\\xc3\\xc7A\\xc7\\xbf\\xc8=\\xc8\\xbc\\xc9:\\xc9\\xb9\\xca8\\xca\\xb7\\xcb6\\xcb\\xb6\\xcc5\\xcc\\xb5\\xcd5\\xcd\\xb5\\xce6\\xce\\xb6\\xcf7\\xcf\\xb8\\xd09\\xd0\\xba\\xd1<\\xd1\\xbe\\xd2?\\xd2\\xc1\\xd3D\\xd3\\xc6\\xd4I\\xd4\\xcb\\xd5N\\xd5\\xd1\\xd6U\\xd6\\xd8\\xd7\\\\\\xd7\\xe0\\xd8d\\xd8\\xe8\\xd9l\\xd9\\xf1\\xdav\\xda\\xfb\\xdb\\x80\\xdc\\x05\\xdc\\x8a\\xdd\\x10\\xdd\\x96\\xde\\x1c\\xde\\xa2\\xdf)\\xdf\\xaf\\xe06\\xe0\\xbd\\xe1D\\xe1\\xcc\\xe2S\\xe2\\xdb\\xe3c\\xe3\\xeb\\xe4s\\xe4\\xfc\\xe5\\x84\\xe6\\r\\xe6\\x96\\xe7\\x1f\\xe7\\xa9\\xe82\\xe8\\xbc\\xe9F\\xe9\\xd0\\xea[\\xea\\xe5\\xebp\\xeb\\xfb\\xec\\x86\\xed\\x11\\xed\\x9c\\xee(\\xee\\xb4\\xef@\\xef\\xcc\\xf0X\\xf0\\xe5\\xf1r\\xf1\\xff\\xf2\\x8c\\xf3\\x19\\xf3\\xa7\\xf44\\xf4\\xc2\\xf5P\\xf5\\xde\\xf6m\\xf6\\xfb\\xf7\\x8a\\xf8\\x19\\xf8\\xa8\\xf98\\xf9\\xc7\\xfaW\\xfa\\xe7\\xfbw\\xfc\\x07\\xfc\\x98\\xfd)\\xfd\\xba\\xfeK\\xfe\\xdc\\xffm\\xff\\xffchrm\\x00\\x00\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x00\\xa3\\xd7\\x00\\x00T{\\x00\\x00L\\xcd\\x00\\x00\\x99\\x9a\\x00\\x00&f\\x00\\x00\\x0f\\\\text\\x00\\x00\\x00\\x00no copyright, use freely\\x00\\n'\n ds7.OpticalPathIdentifier = '1'\n # noinspection SpellCheckingInspection\n ds7.OpticalPathDescription = 'Brightfield'\n\n dcm.OpticalPathSequence = Sequence([ds7])\n del ds7, ds5, ds4\n\n dcm.AcquisitionContextSequence = Sequence([])\n\n ds0 = Dataset()\n ds0.LocalNamespaceEntityID = 'UNKNOWN'\n dcm.IssuerOfTheContainerIdentifierSequence = Sequence([ds0])\n del ds0\n\n ds0 = Dataset()\n\n ds0.SpecimenIdentifier = 'UNKNOWN'\n ds0.SpecimenPreparationSequence = Sequence([])\n ds0.SpecimenUID = generate_uid(prefix=None)\n ds0.IssuerOfTheSpecimenIdentifierSequence = Sequence([])\n dcm.SpecimenDescriptionSequence = Sequence([ds0])\n dcm.ContainerTypeCodeSequence = Sequence([])\n dcm.ContainerIdentifier = 'UNKNOWN'\n return dcm", "def test_print_complex_dipole(self):\n res = ElectronicStructureResult()\n res.computed_energies = np.asarray([1.0])\n res.nuclear_dipole_moment = (0.0, 0.0, 1.0)\n res.computed_dipole_moment = [(0.0, 0.0, 1.0j)]\n res.extracted_transformer_dipoles = [{}]\n self.expected = \"\"\"\\\n === GROUND STATE ENERGY ===\n\n * Electronic ground state energy (Hartree): 1.\n - computed part: 1.\n\n === DIPOLE MOMENTS ===\n\n ~ Nuclear dipole moment (a.u.): [0.0 0.0 1.]\n\n 0:\n * Electronic dipole moment (a.u.): [0.0 0.0 0.0+1.j]\n - computed part: [0.0 0.0 0.0+1.j]\n > Dipole moment (a.u.): [0.0 0.0 1.+1.j] Total: 1.+1.j\n (debye): [0.0 0.0 2.54174623+2.54174623j] Total: 2.54174623+2.54174623j\n \"\"\"\n self._assert_printed_result(res)", "def __discriminator(self, inp, reuse_variables=None):\n with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables) as scope:\n nodes_input = 1\n for i in range(len(self.arch_D)):\n nodes_output = self.arch_D[i]\n inp = fc_layer(inp, nodes_input, nodes_output, 'D_' + str(i + 1) + '_')\n nodes_input = self.arch_D[i]\n\n return fc_layer(inp, self.arch_D[-1], 2,\n 'D_end_',\n final_layer=True)", "def run(self, dag):\n self.property_set[\"commutation_set\"] = defaultdict(list)\n pending_1q = [list() for _ in range(dag.num_qubits())]\n block_id = [-(i + 1) for i in range(dag.num_qubits())]\n current_id = 0\n block_list = list()\n to_qid = dict()\n for i, qubit in enumerate(dag.qubits):\n to_qid[qubit] = i\n for node in dag.topological_op_nodes():\n qids = [to_qid[q] for q in node.qargs]\n if (\n not isinstance(node.op, Gate)\n or len(qids) > 2\n or node.op.condition\n or node.op.is_parameterized()\n ):\n for qid in qids:\n if block_id[qid] > 0:\n block_list[block_id[qid]].extend(pending_1q[qid])\n block_id[qid] = -(qid + 1)\n pending_1q[qid].clear()\n continue\n\n if len(qids) == 1:\n b_id = block_id[qids[0]]\n if b_id < 0:\n pending_1q[qids[0]].append(node)\n else:\n block_list[b_id].append(node)\n elif block_id[qids[0]] == block_id[qids[1]]:\n block_list[block_id[qids[0]]].append(node)\n else:\n block_id[qids[0]] = current_id\n block_id[qids[1]] = current_id\n new_block = list()\n if pending_1q[qids[0]]:\n new_block.extend(pending_1q[qids[0]])\n pending_1q[qids[0]].clear()\n if pending_1q[qids[1]]:\n new_block.extend(pending_1q[qids[1]])\n pending_1q[qids[1]].clear()\n new_block.append(node)\n block_list.append(new_block)\n current_id += 1\n\n self.property_set[\"block_list\"] = [tuple(block) for block in block_list]\n return dag", "def _get_distances(self):\n for molecule in self.values():\n molecule.get_distances()\n\n # for atom in self.atoms:\n # atom.get_distances()", "def forward(self, X): \n decods = torch.zeros(self.batch_size, self.m, 1, dtype=torch.int)\n for i in range(self.batch_size):\n # Reshape the word to (14,1,16,8)\n word = X[i].reshape(self.m, 1, self.input_dim[0],self.input_dim[1])\n # conv operation performed for one word independently to every letter\n features = self.get_conv_features(word)\n # now decode the sequence using conv features\n decods[i] = self.dp_infer(features)\n\n return decods", "def make_all_cosmos_des(run, cosmos_config, des_config, catfile, tileid):\n\n flist = files.get_cosmos_flist(tileid)\n cosmos_meds = files.get_meds_file(run, tileid, 'cosmos','i')\n\n print('making cosmos MEDS:',cosmos_meds)\n maker = CosmosMEDSMaker(\n config_path=cosmos_config,\n catname=catfile,\n flistname=flist,\n )\n maker.write(cosmos_meds)\n\n for band in ['u','g','r','i','z']:\n\n band_flist = files.get_des_flist(band)\n band_meds = files.get_meds_file(run, tileid, 'des',band)\n\n print('making DES MEDS:',band_meds)\n maker = CosmosMEDSMaker(\n config_path=des_config,\n catname=cosmos_meds,\n flistname=band_flist,\n )\n maker.write(band_meds)", "def removeDegeneracy(self, caliStep=-1, threshold=0.5):\n effIDs = np.where(self.posterior[:, caliStep] < threshold)[0]\n self.proposal = self.proposal[effIDs, :]\n self.likelihood = self.likelihood[effIDs, :]\n self.posterior = self.posterior[effIDs, :]\n self.smcSamples[0] = self.smcSamples[0][effIDs, :]\n self.yadeData = self.yadeData[:, effIDs, :]\n self.numSamples = len(effIDs)\n for i in range(self.numSteps):\n self.likelihood[:, i], self.posterior[:, i], \\\n self.ips[:, i], self.covs[:, i] = self.recursiveBayesian(i, self.proposal[:, i])", "def get_molecule_dict(chemfile):\n molecule_dict={}\n with open(chemfile,'r') as f:\n for line in f:\n line=line.strip().split('\\t')\n ikey=line[0]\n smi=line[1]\n mol = Chem.MolFromSmiles(smi)\n if not mol:\n raise ValueError(\"Could not generate Mol from SMILES string:\", smi)\n #Chem.SanitizeMol(mol)\n\n atoms={} #atom_idx -> atom features\n bonds={} #bond_idx -> bond features\n atoms2bond={} #(atom_idx1,atom_idx2) -> bond_idx\n \n nodes_by_degree = {d: [] for d in degrees}\n for atom in mol.GetAtoms():\n atom_feature = atom_features(atom)\n atom_id = smi+str(atom.GetIdx())\n atoms[atom.GetIdx()]=atom_feature \n atom_neighbors={aid: [] for aid in atoms.keys()} #atom_idx -> neighbor atom idxs\n bond_neighbors={aid: [] for aid in atoms.keys()} #atom_idx -> neighbor bond idxs\n\n for bond in mol.GetBonds():\n src_atom_idx = bond.GetBeginAtom().GetIdx()\n tgt_atom_idx = bond.GetEndAtom().GetIdx()\n bond_idx = bond.GetIdx()\n bond_neighbors[src_atom_idx].append(bond_idx)\n bond_neighbors[tgt_atom_idx].append(bond_idx)\n bond_feature = bond_features(bond)\n bonds[bond.GetIdx()] = bond_feature\n atom_neighbors[src_atom_idx].append(tgt_atom_idx)\n atom_neighbors[tgt_atom_idx].append(src_atom_idx)\n atoms2bond[(src_atom_idx,tgt_atom_idx)]=bond_idx\n atoms2bond[(tgt_atom_idx,src_atom_idx)]=bond_idx\n \n atoms_by_degree={d: [] for d in degrees}\n bonds_by_degree={d: [] for d in degrees}\n for aid in atom_neighbors:\n neighbor_atoms = atom_neighbors[aid]\n d = len(neighbor_atoms) #degree of the atom\n atoms_by_degree[d].append(aid) #current atom is degree=d\n neighbor_bonds=[]\n for neighbor in neighbor_atoms:\n bond_idx=atoms2bond[(aid,neighbor)]\n neighbor_bonds.append(bond_idx)\n bonds_by_degree[d].append(neighbor_bonds)\n\n neighbor_by_degree = []\n for degree in degrees:\n neighbor_by_degree.append({\n 'atom': atoms_by_degree[degree],\n 'bond': bonds_by_degree[degree]\n })\n \n molecule_dict[ikey]={'smiles':str(smi),\n 'neighbor_by_degree':neighbor_by_degree,\n 'atoms':atoms,'bonds':bonds,\n 'atom_neighbor':atom_neighbors,\n 'bond_neighbor':bond_neighbors}\n return molecule_dict", "def dft(self, candidates, mutations, ind):\n kmer = self.nodes[ind][0]\n candidates, mutations = self.prune(candidates, mutations, kmer)\n if len(candidates) == 0:\n return\n if len(self.nodes[ind][1]) == 0:\n Y = np.zeros((len(self.observed), 1))\n Y[candidates, 0] = 1\n n_alphas1 = self.X1 @ Y\n n_alphas2 = self.X2 @ Y\n self.K += n_alphas1 @ n_alphas2.T\n self.K11 += n_alphas1 ** 2\n self.K22 += n_alphas2 ** 2\n for e in self.nodes[ind][1]:\n self.dft(candidates.copy(), mutations.copy(), e)", "def dumpDis(mol, fileName='distances.txt', delta=0.5, atomPat='*.H*',maxDis=4.5,prob=1.1,fixLower=0.0):\n\n mol.selectAtoms(atomPat)\n pairs = mol.getDistancePairs(maxDis,False)\n with open(fileName,'w') as fOut:\n for pair in pairs:\n if prob < 1.0:\n r = random.random()\n if r > prob:\n continue\n (atom1,atom2,distance) = pair.toString().split()\n (res1,aname1) = atom1[2:].split('.')\n (res2,aname2) = atom2[2:].split('.')\n atom1 = res1+'.'+aname1\n atom2 = res2+'.'+aname2\n distance = float(distance)\n if res1 != res2:\n upper = distance + delta\n if fixLower > 1.0:\n lower = fixLower\n else:\n lower = distance - delta\n outStr = \"%s %s %.1f %.1f\\n\" % (atom1,atom2,lower,upper)\n fOut.write(outStr)", "def listDegenerate(self):\n return arange(self.nelems())[self.testDegenerate()]", "def make_random_supercontrolled_decomposer(self, seed):\n state = np.random.default_rng(seed)\n basis_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_phase = state.random() * 2 * np.pi\n basis_b = state.random() * np.pi / 4\n basis_unitary = np.exp(1j * basis_phase) * basis_k1 @ Ud(np.pi / 4, basis_b, 0) @ basis_k2\n decomposer = TwoQubitBasisDecomposer(UnitaryGate(basis_unitary))\n return decomposer", "def generate_data(input_file):\n \n mol_mass_list = []\n inchi_list = []\n SMILES_list = []\n identifier_list = []\n inchi_key1_list = [] \n inchi_key2_list = [] \n mol_formula_list = []\n NA_list = []\n \n pre_SMILES_list = []\n identifier_list = []\n all_lines = input_file.split('\\n')\n if all_lines[-1] == '':\n all_lines = all_lines[:-1]\n for line in all_lines:\n line = line.split('\\t')\n\n #Convert to mol and remove invalid structures \n smile_string = ''\n id_string = ''\n m = line[0]\n id_name = line[1]\n mol = Chem.MolFromSmiles(m)\n if mol != None:\n smile_string += m\n id_string += id_name\n pre_SMILES_list += [smile_string]\n \n #Source identifiers\n identifier_list += [id_string]\n \n pre_inchi_list = []\n for smile in pre_SMILES_list:\n #Generate mol\n m = Chem.MolFromSmiles(smile)\n #SMILES, canonical\n sm = Chem.MolToSmiles(m)\n SMILES_list += [sm]\n #Monoisotopic mass\n mol_weigth = Descriptors.ExactMolWt(m)\n mol_mass_list += [mol_weigth]\n #Mol Forumula\n mol_formula = rdMolDescriptors.CalcMolFormula(m)\n mol_formula_list += [mol_formula]\n # InChI \n inchi = rdinchi.MolToInchi(m)\n pre_inchi_list += [inchi[0]] \n \n \n # InChIKey1 and InChIKey2\n for inchi in pre_inchi_list:\n if not str(inchi).startswith('InCh'):\n inchi = 'NA'\n inchi_list += [inchi]\n \n pre_inchi_key_list =[]\n for inchi2 in inchi_list: \n if inchi2 == 'NA':\n inchi_key = \"NA-NA\"\n pre_inchi_key_list += [inchi_key]\n if inchi2 != 'NA':\n inchi_key = rdinchi.InchiToInchiKey(inchi2)\n pre_inchi_key_list += [inchi_key]\n \n for inchi_key in pre_inchi_key_list:\n inchi_key = inchi_key.split('-')\n inchi_key2 = inchi_key[1]\n inchi_key2_list += [inchi_key2]\n inchi_key1 = inchi_key[0]\n inchi_key1_list += [inchi_key1]\n\n # NA list \n nr_of_structures = len(SMILES_list)\n NA_list += ['NA'] * nr_of_structures\n\n overall_list = [mol_mass_list]+[inchi_list]+[SMILES_list]+\\\n [identifier_list]+[inchi_key2_list]+[inchi_key1_list]+[mol_formula_list]+\\\n [NA_list]+[NA_list]+[NA_list]+[NA_list]\n \n return overall_list" ]
[ "0.6078832", "0.55234087", "0.5375857", "0.53457975", "0.5281195", "0.5259086", "0.5258982", "0.5205008", "0.5194316", "0.5151961", "0.5147367", "0.51438403", "0.51321965", "0.5130582", "0.5127317", "0.51114666", "0.51019084", "0.50857204", "0.50702864", "0.5066564", "0.50647205", "0.50641155", "0.5052011", "0.5051172", "0.5026708", "0.500843", "0.4989082", "0.49824467", "0.4924836", "0.49110034", "0.48989803", "0.48827857", "0.48747268", "0.48741847", "0.48663545", "0.4864598", "0.4860564", "0.4857585", "0.4846701", "0.48319322", "0.4816038", "0.48069978", "0.48061123", "0.47991946", "0.47974738", "0.47884208", "0.4782406", "0.47691974", "0.4767168", "0.47663334", "0.4760977", "0.47579372", "0.47576037", "0.4754929", "0.47446156", "0.47410625", "0.47351748", "0.47311527", "0.47251195", "0.47228453", "0.47224054", "0.47212183", "0.47159064", "0.4703944", "0.47008657", "0.46964085", "0.4693316", "0.46932995", "0.46838823", "0.46810538", "0.4675006", "0.4671278", "0.46698004", "0.4669025", "0.4668586", "0.4667796", "0.4663676", "0.46595082", "0.4655937", "0.46528527", "0.46507022", "0.46475178", "0.46456558", "0.46305367", "0.4626187", "0.4625817", "0.46254775", "0.4618442", "0.4614921", "0.4608265", "0.4603351", "0.46024263", "0.45982286", "0.45949122", "0.45874172", "0.45829841", "0.45808524", "0.457941", "0.4579362", "0.45756465" ]
0.4942241
28
Get path to the PubChem template if it exists.
def _get_pubchem_template_path(self, het_id): path = os.path.join(self.pubchem_templates, f"{het_id}.sdf") return path if os.path.isfile(path) else ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_template_path(self):\n raise NotImplementedError()", "def template_path(self) -> str:\n return self._values.get(\"template_path\")", "def get_template(self, template):\n\n template_path = aj.config.data['email']['templates'].get(template, 'default')\n\n if template_path == 'default' or not os.path.isfile(template_path):\n template_path = DEFAULT_TEMPLATES[template]\n\n return template_path", "def template_path(self):\n return self.get_config(\"templates\")", "def getTemplateFile(fname):\n return os.path.join(Configurations.getTemplateDir(), fname)", "def _get_template_filename(self):\n _format = self.cfg.get('mutations', 'format')\n if _format == 'pdf':\n tf = 'PDFTemplate.bt'\n elif _format == 'png':\n tf = 'PNG12Template.bt'\n\n module_dir = os.path.dirname(os.path.abspath(__file__))\n\n return os.path.join(module_dir, templates_dir, tf)", "def _find_base_path(self):\n paths = [path for path, content in self._templates]\n if len(paths) == 1:\n return os.path.dirname(paths[0])\n return common_path_prefix(paths)", "def get_template(name):\n found_dir = False\n pkg_dir = get_sitepackage_dirs()\n for pd in pkg_dir:\n if os.path.isdir(pd + '/lmdo'):\n found_dir = '{}/lmdo/local_template/{}'.format(pd, name)\n if os.path.isfile(found_dir):\n break\n else:\n found_dir = False\n \n if not found_dir:\n Oprint.warn('Template file {} is missing'.format(name), 'lmdo')\n\n return found_dir", "def get_template_filename(template):\n config = read_config(SETTINGS_PATH)\n #String templates\n if (template in STRING_TEMPLATES):\n options = config.options(STRING_TEMPLATES_SECTION) \n for option in options:\n if (option==template):\n #Get root path for the templates\n root_path = config.get(TEMPLATES_SECTION,TEMPLATES_ROOT_PATH)\n #Get the strings path templates\n strings_path = config.get(STRING_TEMPLATES_SECTION,STRING_TEMPLATES_PATH)\n return join(root_path,strings_path),config.get(STRING_TEMPLATES_SECTION,option)", "def get_template_from_path(path: str) -> str:\r\n path = path.replace(\"\\\\\", \"/\")\r\n return path", "def template(self):\n return self.conf.get(\"template\", None)", "def get_template_name(self):\n if self.template_name:\n return self.template_name\n\n if Path('_templates/global/WaitPage.html').exists():\n return 'global/WaitPage.html'\n return 'otree/WaitPage.html'", "def getPublishPath(self, filename):\n \n #recognize first\n if not self.isRecognized(filename): return None\n else:\n filename = Template(xpath(self.currentDataset,\n './/_:fileTemplate/text()', self.currentDatasetNs)).substitute(\\\n self.groupDict, hostname=self.hostname, SCIFLO_ROOT=self.scifloRoot)\n publishAtTpls = xpath(self.currentDataset,\n './/_:publishAt/_:location/_:data/text()',\n self.currentDatasetNs)\n if isinstance(publishAtTpls, (types.ListType, types.TupleType)):\n publishTpl = publishAtTpls[0]\n else: publishTpl = publishAtTpls\n publishAt = Template(publishTpl).substitute(self.groupDict,\n hostname=self.hostname, SCIFLO_ROOT=self.scifloRoot)\n return os.path.join(publishAt, filename)", "def template_dir(self):\n return os.path.join(Config().template_dir(), 'platform')", "def save_path(self):\n return self.template.manager.render_template_txt(self.path, self.template)", "def _get_config_template(self, key):\n tmp_path = self._get_config_value('templates', 'path') + key\n return tmp_path", "def determine_template_by_path(path):\n path = path.lstrip('/')\n\n path_chunks = re.split('\\/', path)\n if len(path_chunks) <= 1:\n return path\n else:\n \"\"\"\n For now be ignorant and just return the\n first entry of the list as the possible template\n name, so in fact we only have a 1 level deep structure\n \"\"\"\n return '_%s.html' % path_chunks[0]", "def get_template_path(relative_path, **kwargs): # lint-amnesty, pylint: disable=unused-argument\n return relative_path", "def __default_pptx_path(self):\n thisdir = os.path.split(__file__)[0]\n return os.path.join(thisdir, 'templates', 'default.pptx')", "def template_path(name):\n template_dir = os.path.join(os.path.dirname(__file__), 'templates')\n return os.path.join(template_dir, (name + \".html\"))", "def get_template_path(relative_path):\r\n\r\n if not is_request_in_microsite():\r\n return relative_path\r\n\r\n microsite_template_path = str(get_value('template_dir'))\r\n\r\n if microsite_template_path:\r\n search_path = os.path.join(microsite_template_path, relative_path)\r\n\r\n if os.path.isfile(search_path):\r\n path = '{0}/templates/{1}'.format(\r\n get_value('microsite_name'),\r\n relative_path\r\n )\r\n return path\r\n\r\n return relative_path", "def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str:\r\n # automatically select path type depending on running OS\r\n if path_type == PathType.AUTO:\r\n if platform.system() == \"Windows\":\r\n path_type = PathType.WINDOWS\r\n elif platform.system() == \"Linux\":\r\n path_type = PathType.LINUX\r\n else:\r\n raise RuntimeError(\"Unknown platform\")\r\n\r\n path_template = path_template.replace(\"<USERNAME>\", get_user_name())\r\n\r\n # return correctly formatted path\r\n if path_type == PathType.WINDOWS:\r\n return str(pathlib.PureWindowsPath(path_template))\r\n elif path_type == PathType.LINUX:\r\n return str(pathlib.PurePosixPath(path_template))\r\n else:\r\n raise RuntimeError(\"Unknown platform\")", "def template(self):\n template_names = self.get_template_names()\n if template_names:\n return template_names[0]\n return None", "def path(self) -> Optional[str]:\n return pulumi.get(self, \"path\")", "def path(self) -> Optional[str]:\n return pulumi.get(self, \"path\")", "def path(self) -> Optional[str]:\n return pulumi.get(self, \"path\")", "def getTmpTemplateFile(fname):\n return os.path.join(Configurations.getTmpTemplateDir(), fname)", "def get_template(self, template):\n\n\n env = Environment(\n loader=FileSystemLoader('templates')\n )\n return env.get_template(template)", "def _get_template(self):\n # Get templates and put them in the order of importance:\n # 1. template specified in \"modules.yaml\"\n # 2. template specified in a package directly\n # 3. default template (must be defined, check in __init__)\n module_system_name = str(self.module.__name__).split(\".\")[-1]\n package_attribute = \"{0}_template\".format(module_system_name)\n choices = [\n self.conf.template,\n getattr(self.spec.package, package_attribute, None),\n self.default_template, # This is always defined at this point\n ]\n # Filter out false-ish values\n choices = list(filter(lambda x: bool(x), choices))\n # ... and return the first match\n return choices.pop(0)", "def _get_template(self):\n try:\n template_path = current_app.config.get('REPORT_TEMPLATE_PATH')\n template_code = Path(f'{template_path}/{self._get_template_filename()}').read_text()\n # substitute template parts\n template_code = self._substitute_template_parts(template_code)\n except Exception as err: # noqa: B902; just logging\n current_app.logger.error(err)\n raise err\n return template_code", "def template(self) -> str:\n manifest = self._get_manifest()\n\n return manifest[\"template\"]", "def template_dir(self):\n return self.cm.get(YAML_CONFIG_TEMPLATE_DIR)", "def _git_templates():\n search_locations = [\n '/usr/share/git-core/templates',\n '/usr/local/share/git-core/templates',\n '/usr/local/git/share/git-core/templates'\n ]\n\n for possible_location in search_locations:\n if isdir(possible_location):\n return possible_location\n\n return None", "def getTemplateDir():\n return os.path.join(Configurations.getProjectRootDir(), TEMPLATE_DIR_NAME)", "def get_template_name(self):\n template = None\n if self.template:\n template = self.template\n if not template:\n for p in self.get_ancestors(ascending=True):\n if p.template:\n template = p.template\n break\n if not template:\n template = settings.CMS_TEMPLATES[0][0]\n for t in settings.CMS_TEMPLATES:\n if t[0] == template:\n return t[1] \n return _(\"default\")", "def get_template(self):\n if self.get_website:\n return self.get_website.get_template()\n else:\n return default_entity.get_website.get_template()", "def get_template_name(self):\n if self.template_name:\n return '%s' % self.template_name\n\n if self.template_name_prefix:\n return '%s%s.html' % (self.template_name_prefix, self.mode)\n\n for piece_name in reversed(list(self.pieces.keys())):\n piece = getattr(self, piece_name)\n result = piece.get_template_name()\n if result:\n return '%s.html' % result\n\n return None", "def join_path(self, template, parent):\n if (template.startswith('./')):\n return os.path.join(os.path.dirname(parent), template)\n return template", "def process_template(self, component):\n destination = os.path.join(self.pubchem_templates, f\"{component.id}.sdf\")\n downloaded = download_template(destination, component.id, component.inchikey)\n\n if downloaded:\n rescale_molecule(destination, 1.5)\n\n return downloaded", "def _get_template(specified_template, default_template):\n template_file_path = specified_template\n if template_file_path:\n if not (os.path.exists(template_file_path) and os.path.isfile(template_file_path)):\n LOG.error(u\"Template file: %s doesn't exist, using default template\",\n template_file_path)\n template_file_path = None\n\n if not template_file_path:\n # using default template\n template_file_path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n default_template\n )\n\n LOG.debug(u\"template file used: %s\", template_file_path)\n with open(template_file_path, \"r\") as definition:\n return definition.read()", "def get_path(self):\n\n if not self.path:\n Settings.err_print(\"missing file path\")\n return \"\"\n return self.path", "def complete_template(dataset_directory: str, template: str) -> str:\n dataset_directory = os.path.abspath(dataset_directory)\n result = os.path.normpath(os.path.join(dataset_directory, template))\n if Path(dataset_directory) not in Path(result).parents:\n raise ValueError(f\"Template '{template}' points outside of the dataset dataset directory.\")\n return result", "def get_pkginfo_template(pkginfo_template_path):\n pkginfo_template = FoundationPlist.readPlist(\n os.path.expanduser(pkginfo_template_path)).get(\"pkginfo\")\n if not pkginfo_template:\n sys.exit(\"Pkginfo template format incorrect!. Quitting.\")\n return pkginfo_template", "def path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"path\")", "def _FindTemplateFile(self, topdir):\n if topdir.endswith('..'):\n topdir = '/'.join(topdir.split('/')[:-2])\n fnames = os.listdir(topdir)\n for fname in fnames:\n filename = '%s/%s' % (topdir, fname)\n if filename.endswith('.yaml') and not os.path.isdir(filename) and \\\n os.path.exists(filename):\n f = open(filename, 'r')\n magic_code = f.read(22)\n f.close()\n if '#!fmri_file_template' in magic_code:\n return filename\n return None", "def get_template_path_with_theme(relative_path):\n relative_path = os.path.normpath(relative_path)\n\n theme = get_current_theme()\n\n if not theme:\n return relative_path\n\n # strip `/` if present at the start of relative_path\n template_name = re.sub(r'^/+', '', relative_path)\n\n template_path = theme.template_path / template_name\n absolute_path = theme.path / \"templates\" / template_name\n if absolute_path.exists():\n return str(template_path)\n else:\n return relative_path", "def get_path_to(self, content):\n exported = self.getExported()\n content_path = content.getPhysicalPath()\n if is_inside_path(exported.rootPath, content_path):\n return \"/\".join(canonical_tuple_path(\n [exported.root.getId()] + relative_tuple_path(\n exported.rootPath, content_path)))\n return \"root:\" + \"/\".join(canonical_tuple_path(\n relative_tuple_path(exported.basePath, content_path)))", "def get_relative_path(self):\n if self.dip or self.sip or self.replica:\n raise PackageError(\n \"Get relative path for sip or replica packages not yet implemented\"\n )\n if self.deleted:\n raise PackageError(\"There are no relative paths for deleted packages\")\n if self.uuid is None:\n raise PackageError(\"Cannot generate a relative path without a package UUID\")\n rel = \"\"\n left_offset = len(self.default_pair_tree)\n right_offset = -len(self.compressed_ext)\n try:\n if self.current_path.endswith(self.compressed_ext):\n rel = self.current_path[left_offset:right_offset]\n else:\n rel = self.current_path[left_offset:]\n except AttributeError:\n raise PackageError(\"Current path doesn't exist for the package\")\n return \"{}/data/METS.{}.xml\".format(rel, self.uuid)", "def template_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"template_name\")", "def get_template(self):\n return self.template", "def template(self):\n with open(self.compute.submission_template, \"r\") as f:\n return f.read()", "def get_source(self, environment, template):\n if \"/\" not in template:\n path = template.split(\".\")\n\n # The first part of the path must be a bundle name\n # The other parts are the hierarchy of directory after 'views'\n bundle = path[0]\n sub_hierarchy = \"/\".join(path[1:])\n path = \"bundles/\" + bundle + \"/views/\" + sub_hierarchy + \".jj2\"\n else:\n path = template\n\n path = join(self.server.user_directory, path)\n if not exists(path):\n raise TemplateNotFound(template)\n\n mtime = getmtime(path)\n with open(path, 'r', encoding=\"utf-8\") as file:\n source = file.read()\n\n return source, path, lambda: mtime == getmtime(path)", "def join_path(self, template: str, parent: str) -> str:\n return template", "def templates_folder(self):\n return os.path.join(\n os.path.dirname(__file__), \"default_config\", \"divvy_templates\"\n )", "def pdf_path() -> Path:\n return Path(__file__).parent / 'data' / 'ocr.pdf'", "def test_templates(self):\n path = str(Template())\n self.assertTrue(os.path.exists(path))", "def get_content_path(content):", "def _get_template_fname(self):\n template_fname = self._context.get('template_fname', False)\n return template_fname", "def destPath(file, package, type='files'):\n\treturn tmpDir(package)+'/etc/univention/templates/'+type+'/'+file", "def path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"path\")", "def get_path(self) -> Optional[str]:\n return self.path", "def get_contest_template_file(gt_id, horizon):\n return os.path.join(\"data\", \"fcstrodeo_nctemplates\",\n get_contest_id(gt_id, horizon)+\"_template.nc\")", "def _find_relative(self, spec):\n if spec.template_rel_path is not None:\n return os.path.split(spec.template_rel_path)\n # Otherwise, determine the file name separately.\n\n locator = self.loader._make_locator()\n\n # We do not use the ternary operator for Python 2.4 support.\n if spec.template_name is not None:\n template_name = spec.template_name\n else:\n template_name = locator.make_template_name(spec)\n\n file_name = locator.make_file_name(template_name, spec.template_extension)\n\n return (spec.template_rel_directory, file_name)", "def path(self):\n path = os.path.join(self.base_dir, self.store().replace(' ', '_'), self.package_name())\n return os.path.abspath(path)", "def _get_template():\n r = get('http://metadata.google.internal/'\n 'computeMetadata/v1/instance/attributes/instance-template',\n headers={'Metadata-Flavor': 'Google'})\n if r.status_code == 200:\n return sub(r'.+instanceTemplates/(.+)', r'\\1', r.text)\n else:\n return ''", "def getTemplate():\n\n with open('/home/sevudan/Scripts/projects/topogen/template.cfg', 'r') as file:\n data = file.read()\n file.close()\n return Template(data)", "def _pkg_path(self, pkg):\n r = rospkg.RosPack()\n pkg_path = r.get_path(pkg) \n return pkg_path", "def _pkg_path(self, pkg):\n r = rospkg.RosPack()\n pkg_path = r.get_path(pkg) \n return pkg_path", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def get_content_path(section, path=None):\n\n if path and os.path.isdir(path):\n return 'content/' + path\n elif section == 'blog' and os.path.isdir('content/blog'):\n return 'content/blog'\n elif section == 'news' and os.path.isdir('content/news'):\n return 'content/news'\n\n return 'content/' + path", "def sed_template_filename(sedtype):\n path = datapath.sed_template_path()\n filename = 'SEDtemplate_'+sedtype.lower()+'.fits'\n return join(path, filename)", "def source_pod_path(self):\n source_path = self.pod.path_format.format_static(\n self.source_format, locale=self.locale)\n # Fall back to the pod path if using locale and the localized\n # version does not exist.\n if self.use_locale and self.use_fallback and not self.pod.file_exists(source_path):\n source_path = self.pod_path\n return source_path", "def template_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"template_id\")", "def getFile(self):\n #try to redetect the filetype\n vim.command(\"filetype detect\")\n #return the filetype\n filetype = vim.eval(\"&ft\")\n #filetype = vim.command(\"&ft\")\n if filetype:\n for file in self.template_files:\n if filetype.lower() in file.lower():\n self.hasTemplate = True\n return open(self.template_folder + \"/\" + file, 'r')\n return None", "def structure_file_path(self):\n return os.path.join(\n self.base_path,\n self.structure_dir,\n self.content_path,\n self.structure_filename\n )", "def current_if_exists(cls):\n ret_val = gxapi_cy.WrapEMAPTEMPLATE._current_if_exists(GXContext._get_tls_geo())\n return GXEMAPTEMPLATE(ret_val)", "def get_notification_template(self):\n if self.db_config_file.key_exists(\"notification_template_file\"):\n filename = self.db_config_file_value(\"notification_template_file\").strip('\"')\n return open(filename, 'rt').read()\n\n return get_data(\"asebackupcli\", \"notification.json\")", "def get_nuspec_at_path(path):\n if NuSpec.path_is_nuspec_file(path):\n return path\n elif os.path.isdir(path):\n return utils.dir_find_file_with_extension(path, '.nuspec')\n return None", "def path(self):\n if self._package:\n return self._package.resourceDir/self._storageName\n else:\n return self._storageName", "def get_template_base_directory(request):\n try:\n print(\"asdf\")\n return TemplateBaseDirectory.for_request(request).name\n except Exception:\n # If we can't get the template base directory from the database,\n # just use the default one.\n return \"plain\"", "def _get_template(self, template_name):\n if template_name not in self.chached_templates:\n self.chached_templates[template_name] = self.env.get_template(template_name)\n return self.chached_templates[template_name]", "def get_resource(filename: str, path: str | None = None) -> str:\n root = Path(__file__).parent\n full_path = root if path is None else root / Path(path)\n return str(full_path / filename)", "def _GetTemplate(self):\n# First read default template.\n tmplt = self._LoadTemplate(c.preproc_template_default)\n tmplt['proc'] = self.topdir\n self.template_type = 'default'\n\n self.templates = []\n if self.template_file is not None:\n tmplt.update(self._LoadTemplate(self.template_file))\n self.template_type = 'command-line'\n self.templates.append(os.path.abspath(self.template_file))\n found_template = True\n else:\n# Find a study specific template file.\n study_template_file = self._FindTemplateFile('%s/..' % self.topdir)\n if study_template_file is not None:\n# Merge study template into default, study template has precedence.\n if self.verbose:\n print \"Using study template at \" + study_template_file\n tmplt.update(self._LoadTemplate(study_template_file))\n self.template_type = 'study-specific'\n self.templates.append(os.path.abspath(study_template_file))\n found_template = True\n else:\n found_template = False\n# Now look for a subject-specific template file.\n subject_template_file = self._FindTemplateFile('%s' % self.topdir)\n if subject_template_file is not None:\n# Merge subject template, subject template has precedence.\n if self.verbose:\n print \"Using subject-specific template at %s\" % \\\n subject_template_file\n tmplt.update(self._LoadTemplate(subject_template_file))\n self.template_type = 'study-specific'\n self.templates.append(os.path.abspath(subject_template_file))\n found_template = True\n\n if not found_template:\n raise RuntimeError('Could not find template file.')\n\n if tmplt.get('subject','same') == 'same':\n# Default subdirectory is same as data directory.\n tmplt['subject'] = self.topdir.split('/')[-1]\n else:\n if not isinstance(tmplt['subject'],str):\n errstr = 'preprocess: Invalid subject number. Be sure to ' + \\\n 'enclose the subject number item with double quotes.'\n raise RuntimeError(errstr)\n\n# Keys that apply to all EPIs.\n self.fsl_flip = tmplt.get('fsl_flip', False)\n if self.fsl_flip:\n self.flip_opts = '-LT'\n else:\n self.flip_opts = ''\n\n# Replace strings with python types.\n for key in tmplt.keys():\n if tmplt[key] == 'None':\n tmplt[key] = None\n elif key == 'True':\n tmplt[key] = True\n elif key == 'False':\n tmplt[key] = False\n return tmplt", "def get_templates_dir(self):\n return self.templates_dir", "def generate_filename_template_path(output_dir, filename_template):\n if output_dir:\n os.makedirs(output_dir, exist_ok=True)\n return os.path.join(output_dir, filename_template)\n return None", "def __get_path(self):\n return self.path", "def _document_path(document):\n\n try:\n path = document.fullName.fsName\n except Exception:\n path = None\n\n return path", "def template_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"template_name\")", "def get_packaging_pictures_path(self):\n file_path = os.path.dirname(__file__)\n file_path = os.path.join(file_path, \"Packaging\")\n return file_path", "def launch_template_name(self) -> Optional[str]:\n return pulumi.get(self, \"launch_template_name\")", "def launch_template_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"launch_template_name\")", "def template():\n return ENVIVIRTUALIZABLEURI('DEFile')", "def install_location(self):\r\n return self._content_at_path('/template/os/install/%s' % self.install_type)", "def template(self):\n return self._template", "def template(self):\n return self._template", "def template(self):\n return self._template", "def resource_path(p=()):\n # map a string to a tuple containing the string to provide the obvious shortcut\n if isinstance(p, str):\n p = (p,)\n return os.path.join(os.path.dirname(os.path.realpath(__file__)), *p)", "def get_template(self, name):\n with open(name, 'r+') as open_f:\n template_content = open_f.read()\n return template_content" ]
[ "0.6953855", "0.68982375", "0.68708664", "0.67688173", "0.65419143", "0.64124465", "0.63890076", "0.63717955", "0.6362928", "0.63347596", "0.6195259", "0.61605114", "0.61545146", "0.61206657", "0.6091912", "0.60857964", "0.60805887", "0.6065575", "0.60610527", "0.6056612", "0.60344166", "0.60231346", "0.60086554", "0.5999773", "0.5999773", "0.5999773", "0.5994696", "0.5989366", "0.5982411", "0.5970935", "0.5956089", "0.5945159", "0.58641535", "0.58632034", "0.58451915", "0.5833164", "0.5831121", "0.583047", "0.5820457", "0.5786521", "0.5780929", "0.5776611", "0.5759498", "0.5752421", "0.5752421", "0.5735011", "0.5729063", "0.5721901", "0.5702499", "0.570245", "0.5680211", "0.5676361", "0.5672924", "0.5669913", "0.5652654", "0.5646262", "0.5646203", "0.56447303", "0.564212", "0.5632996", "0.5616154", "0.561337", "0.56083363", "0.56036806", "0.5592773", "0.55838555", "0.5579073", "0.5572726", "0.5572726", "0.5571227", "0.5571227", "0.55544263", "0.55542976", "0.5545891", "0.55451804", "0.55346316", "0.5532179", "0.55139554", "0.5513511", "0.5512961", "0.5508509", "0.55052775", "0.5502323", "0.55006117", "0.5495378", "0.54627585", "0.5458405", "0.5458156", "0.54560715", "0.5454866", "0.5441422", "0.5435423", "0.5406872", "0.5403522", "0.53989357", "0.5393784", "0.5393784", "0.5393784", "0.53865343", "0.53842306" ]
0.7834179
0
Loads a template molecule with 2D coordinates
def _load_template(self, path): mol = Chem.RWMol() extension = os.path.basename(path).split(".")[1] if extension == "sdf": mol = Chem.MolFromMolFile(path, sanitize=True, removeHs=True) elif extension == "pdb": mol = Chem.MolFromPDBFile(path, sanitize=True, removeHs=True) else: raise ValueError("Unsupported molecule type '{}'".format(extension)) p = Chem.AdjustQueryParameters() p.makeAtomsGeneric = True p.makeBondsGeneric = True mol = Chem.AdjustQueryProperties(mol, p) return mol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_xyz(filename):\n periodic = load_periodic()\n #read molecule\n with open(filename) as f:\n size = int(next(f))\n title = next(f).strip()\n molecule = Molecule(title,size)\n for _ in range(size):\n row = next(f).split()\n tag = row[0]\n element = periodic[tag]\n coordinate = []\n for j in range(3):\n coordinate.append(float(row[j+1]))\n atom = Atom(element,coordinate)\n\n molecule.append(atom)\n f.close()\n \n return molecule", "def __load_topography__(filepath):\n\tfrom clawpack.geoclaw import topotools\n\ttopo = topotools.Topography(filepath)\n\t\n\tif TESTING:\n\t\timport matplotlib.pyplot as plt\n\t\ttopo.plot()\n\t\tplt.show()\n\ttopo.topo_type = 3\n\txgrid = topo.X\n\tygrid = topo.Y\n\tzgrid = topo.Z\n\t\n\t#temp; find a better solution (e.g. convert from lat/lon to actual space)\n\t#xgrid = 1.e4 * xgrid\n\t#ygrid = 1.e4 * ygrid\n\t\n\t#test only\n\tshape = zgrid.shape\n\tny, nx = shape[0], shape[1]\n\t#for iy in range(0,ny):\n\t\t#zgrid[iy, 0] = zgrid[iy,0]+1e4\n\t#for ix in range(0,nx):\n\t\t#zgrid[1, ix] = zgrid[1,ix]-1e4\n\t\n\tdef wavy(x, y):\n\t\treturn np.sin(0.2*np.pi*x)*np.cos(0.4*np.pi*y)\n\t\n\twavyz = wavy(xgrid, ygrid)\n\t\n\t\n\tfor ix in range(0,0):\n\t\tfor iy in range(0,0):\n\t\t\tzgrid[iy, ix] = 1e4*wavyz[iy, ix]\n\t\n\tzgrid = 1e-4 * zgrid\n\t\n\treturn (xgrid, ygrid, zgrid)", "def __init__(\n self,\n pubchem_templates_path: str = \"\",\n general_templates_path: str = config.general_templates,\n ) -> None:\n self.coordgen_params = rdCoordGen.CoordGenParams()\n self.coordgen_params.coordgenScaling = 50 / 1.5\n self.coordgen_params.templateFileDir = config.coordgen_templates\n\n self.pubchem_templates = (\n pubchem_templates_path if os.path.isdir(pubchem_templates_path) else \"\"\n )\n self.templates: Dict[str, rdkit.Chem.rdchem.Mol] = OrderedDict()\n\n if os.path.isdir(general_templates_path):\n for k in sorted(os.listdir(general_templates_path)):\n template = self._load_template(os.path.join(general_templates_path, k))\n template_name = k.split(\".\")[0]\n self.templates[template_name] = template", "def fixture_coord():\n\tEXAMPLE_FILE_FOLDER = str(MODULE_DIR) + \"/data/nail1/\"\n\tcoord_x, coord_y, coord = read.load_coord(EXAMPLE_FILE_FOLDER)\n\treturn coord", "def load_coordinates(chip):\n for z in range(8):\n # Initiate a pre-filled grid\n chip.coordinates.append([[0 for x in range(chip.width)] for y in range(chip.height)])\n\n # Iterate through the grid\n for y in range(chip.height):\n for x in range(chip.width):\n coordinate = crd.Coordinate(x, y, z)\n\n # Add connections\n if z > 0:\n coordinate.connections[x, y, z - 1] = wr.Wire(x, y, z - 1)\n chip.coordinates[z-1][y][x].connections[x, y, z] = wr.Wire(x, y, z)\n\n if y >= 0 and y < chip.height:\n coordinate.connections[x, y + 1, z] = wr.Wire(x, y + 1, z)\n\n if y > 0 and y <= chip.height:\n coordinate.connections[x, y - 1, z] = wr.Wire(x, y - 1, z)\n\n if x >= 0 and x < chip.width:\n coordinate.connections[x + 1, y, z] = wr.Wire(x + 1, y, z)\n\n if x > 0 and x <= chip.width:\n coordinate.connections[x - 1, y, z] = wr.Wire(x - 1, y, z)\n\n # Replace coordinate with its respective class\n chip.coordinates[z][y][x] = coordinate\n\n return chip", "def rd_xyz(self):\n nmol = self.__rd_xyz_nmol()\n fpin = open(self.config['xyzfile'], \"r\")\n tmol = self.template['molspec']['atoms']\n ntatom = self.template['molspec']['n_atoms']\n mol = []\n for i in range(nmol):\n # number of atom,\n line = fpin.readline().strip()\n natom = int(line)\n line = fpin.readline()\n\n jobname = \"%s\" % line[:-1]\n atom = []\n\n if ntatom != natom:\n print \"geometry data in template file is not consistant with xyz file. check the template.\"\n for j in range(natom):\n line = fpin.readline()\n rec = line.split()\n if len(rec) == 5:\n atomname, x, y, z, imove = rec\n elif len(rec) == 4:\n atomname, x, y, z = rec\n else:\n print \"nothing to do...\"\n exit(1)\n frg = tmol[j]['frg']\n record = {'name': atomname, 'coord': [float(x),float(y),float(z)], 'frg':frg}\n atom.append(record)\n onemol = {'natom': natom, 'jobname': jobname, 'info': '', 'atom':atom}\n mol.append(onemol)\n self.model['mol'] = mol\n fpin.close()\n return", "def loadTiles():\n with open('resources/map.txt', 'r') as f:\n rows = f.readlines()\n global numCols\n numCols = len(rows[0].split('\\t')) # Assumes all rows contain the same number of tabs\n global numRows\n numRows = len(rows)\n for y in range(numRows):\n cols = rows[y].split('\\t')\n for x in range(numCols):\n tileName = cols[x].replace('\\n', '')\n if tileName == \"StartingRoom\":\n global currentPosition\n currentPosition = [x, y]\n _world[(x, y)] = None if tileName == '' else getattr(__import__('tiles'), tileName) (x, y)", "def load_data(self, map_name, grid_name, tp_name):\n \n self.map= TiledMap(path.join(self.map_folder, map_name))\n self.map_img = self.map.make_map()\n self.map_img2 = self.map_img\n #self.noisy_map_img = noisy(\"gauss\", pg.surfarray.array3d(self.map_img))\n self.noisy_map_img = make_noisy(pg.surfarray.array3d(self.map_img))\n self.map_rect = self.map_img.get_rect()\n \n with open(path.join(self.map_folder, tp_name), 'rt') as f:\n # destinations is a dict mapping each tilemap teleport coordinate to\n # the destination tilemap coordinate\n self.destinations = eval(f.read())\n\n self.grid= OccupancyGrid(self, path.join(self.map_folder, grid_name)) #down here because it needs destinations\n self.graph = self.grid.make_graph()\n\n #sounds\n self.wall_channel=pg.mixer.Channel(0)\n self.wall_sound=pg.mixer.Sound(WALL_THUD_SOUND)\n self.teleport_channel=pg.mixer.Channel(1)\n self.teleport_sound=pg.mixer.Sound(TELEPORT_SOUND)", "def init_molecule():\n \n pos1 = np.array([[0.2],[0.2]])\n pos2 = np.array([[0.8],[0.8]])\n mol = Molecule(pos1, pos2, 10, 20, 1, 0.5)\n \n return mol", "def loadNodes(self, fname):\r\n with open(fname, \"r\") as fp:\r\n\r\n # Read in the header\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == \"SPECGRID\":\r\n self.SPECGRID = np.array(fp.readline().split()[0:3], dtype=int)\r\n if item[0] == \"COORDSYS\":\r\n self.COORDSYS = fp.readline().split()\r\n if item[0] == \"COORD\":\r\n break\r\n\r\n # Read in the coordinates\r\n self.coords = []\r\n for line in fp:\r\n if line.split()[-1] != \"/\":\r\n item = line.split()\r\n for c in item:\r\n if '*' in c:\r\n cc = c.split('*')\r\n for i in range(int(cc[0])):\r\n self.coords.append(cc[-1])\r\n else:\r\n self.coords.append(c)\r\n else:\r\n if len(line.split()) > 1:\r\n item = line.split()\r\n for i in range(len(item) - 1):\r\n cc = item[i]\r\n if '*' in cc:\r\n ccc = cc.split('*')\r\n for j in range(int(ccc[0])):\r\n self.coords.append(ccc[-1])\r\n else:\r\n self.coords.append(c)\r\n break\r\n else:\r\n break\r\n\r\n # Read in ZCORN\r\n self.zcorn = []\r\n i = 0\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == \"ZCORN\":\r\n for line in fp:\r\n if line.split():\r\n if line.split()[-1] != \"/\":\r\n self.zcorn += line.split()\r\n else:\r\n self.zcorn += line.split()[0:-1]\r\n break\r\n if len(self.zcorn) > 0:\r\n break\r\n\r\n # Read in (in)active cells\r\n self.active = []\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == \"ACTNUM\":\r\n for line in fp:\r\n if line.split():\r\n if line.split()[-1] != \"/\":\r\n c = line.split()\r\n if '*' in c:\r\n cc = c.split('*')\r\n for i in range(float(cc[0])):\r\n self.active += cc[-1]\r\n else:\r\n self.active += c\r\n else:\r\n self.active += line.split()[0:-1]\r\n break\r\n\r\n self.coords = np.array(self.coords, dtype=float)\r\n print(self.coords)\r\n\r\n # In Petrel...\r\n self.ne = self.SPECGRID[0] # x i\r\n self.nn = self.SPECGRID[1] # y j\r\n self.nz = self.SPECGRID[2] # z k\r\n\r\n # build grid\r\n self.buildGrid(plot=False)\r\n self.buildActiveCells(plot=False)\r\n self.buildZGrid(plot=False)\r\n # self.calculateVolumes(plot=False)\r\n #\r\n # Convert to VTK\r\n self.GridType = \"vtkStructuredGrid\"\r\n self.Grid = vtk.vtkStructuredGrid()\r\n self.Grid.SetDimensions(self.ne+1, self.nn+1, self.nz+1)\r\n vtk_points = vtk.vtkPoints()\r\n ve = 1.\r\n\r\n for iz in range(self.nz):\r\n if iz == 0:\r\n for iy in range(self.nn+1):\r\n for ix in range(self.ne+1):\r\n vtk_points.InsertNextPoint( self.X0[ix,iy], \\\r\n self.Y0[ix,iy], \\\r\n ve * self.ZZT[iz][ix,iy] )\r\n for iy in range(self.nn+1):\r\n for ix in range(self.ne+1):\r\n vtk_points.InsertNextPoint( self.X0[ix,iy], \\\r\n self.Y0[ix,iy], \\\r\n ve * self.ZZB[iz][ix,iy] )\r\n self.Grid.SetPoints(vtk_points)\r\n\r\n # Add in active cells\r\n ac = vtk.vtkIntArray()\r\n ac.SetName( \"ActiveCells\" )\r\n for iac in self.ActiveCells.flatten( order='F' ):\r\n ac.InsertNextTuple1( iac )\r\n self.Grid.GetCellData().AddArray(ac)", "def healpix_template(ncomp, nside, coordinate=None):\n\n temp = so_map()\n\n if ncomp == 3:\n temp.data = np.zeros((3, 12 * nside ** 2))\n else:\n temp.data = np.zeros((12 * nside ** 2))\n\n temp.pixel = \"HEALPIX\"\n temp.ncomp = ncomp\n temp.nside = nside\n temp.geometry = \"healpix geometry\"\n temp.coordinate = coordinate\n return temp", "def __rd_tpl_molspec(self, fp):\n atoms = []\n # at first spin/charge\n line = fp.readline()\n spin_charge = line[:-1]\n # cart. coord.\n while True:\n line = fp.readline()\n if line.strip() == \"\":\n break\n record = self.__split_line_in_molspec(line)\n atoms.append(record)\n n_atoms= len(atoms)\n\n molspec = {'spin_charge': spin_charge, 'n_atoms': n_atoms,\n 'atoms': atoms}\n\n self.template['molspec'] = molspec\n return", "def mrtrix_mesh2vox(surface_path, template_path, temp_dir, output_prefix):\n # Adapt affine translation using metadata\n template = nib.load(template_path)\n _, _, meta = read_geometry(surface_path, read_metadata=True)\n\n template = nib.as_closest_canonical(template)\n affine = template.affine.copy()\n affine[:-1, -1] = template.affine[:-1, -1] - meta['cras']\n\n new_template = nib.Nifti1Image(template.dataobj, affine)\n new_template_path = temp_dir / 'template.mgz'\n nib.save(new_template, new_template_path)\n\n # Reconstruct volume from mesh\n subprocess.run(['mesh2voxel', surface_path, new_template_path, temp_dir / f'{output_prefix}_output.mgz'])\n\n # Save the reconstructed volume with the right affine\n output = nib.load(temp_dir / f'{output_prefix}_output.mgz')\n new_output = nib.Nifti1Image(output.dataobj, template.affine)\n # nib.save(new_output, output_path)\n\n return new_output", "def load_template(\n dataset: DatasetManager, template_dir: str, template_name: str\n) -> NexusTemplate:\n if template_name == \"linear\":\n return LinearNexusTemplate()\n\n fullpath = os.path.join(template_dir, template_name)\n with open(fullpath + \".json\", \"r\") as fdata:\n data = json.load(fdata)\n\n level_doors = []\n other_doors = []\n for eid, door_data in data[\"doors\"].items():\n if door_data[\"level\"] in dataset.levels:\n level_doors.append(eid)\n else:\n other_doors.append(eid)\n\n return NexusTemplate(fullpath, template_name, data, level_doors, other_doors)", "def load_interp_2d():\n f = open('2d_interp_h_vs_dB.pkl', 'rb')\n interp_dict = pickle.load(f)\n f.close()\n return interp_dict['a_grid_interp'],\\\n interp_dict['b_grid_interp']", "def test_get_topology_template(self):\n pass", "def fill_template(map_filepath, resolution, origin): # NOTE: Copied from generate_map_yaml.py\n template = \"\"\"image: MAP_FILEPATH\nresolution: RESOLUTION\norigin: [ORIGIN_X, ORIGIN_Y, YAW]\nnegate: 0\noccupied_thresh: 0.65\nfree_thresh: 0.196\n\"\"\"\n template = template.replace('MAP_FILEPATH', map_filepath)\n template = template.replace('RESOLUTION', str(resolution))\n template = template.replace('ORIGIN_X', str(origin[0]))\n template = template.replace('ORIGIN_Y', str(origin[1]))\n template = template.replace('YAW', str(origin[2]))\n return template", "def _create_from_template(self):\n template_file = self._helper._get_template_file_path()\n self._engine.open_file_by_path(template_file)\n self._save_current_as_new()", "def read_from_grid(filename):\n\n x=[]\n y=[]\n z=[]\n\n fid=open(filename,'r')\n\n for point in fid:\n x.append(float(point.split()[0]))\n y.append(float(point.split()[1]))\n z.append(float(point.split()[2]))\n\n fid.close()\n\n return x, y, z", "def __init__(self,\r\n ox=0.0, oy=0.0, oz=0.0,\r\n dx=1.0, dy=1.0, dz=1.0,\r\n nx=200, ny=200, nz=10,\r\n gtype='points', gname='image',\r\n periodicity=False):\r\n\r\n self.ox = ox\r\n self.oy = oy\r\n self.oz = oz\r\n self.dx = dx\r\n self.dy = dy\r\n self.dz = dz\r\n self.nx = nx\r\n self.ny = ny\r\n self.nz = nz\r\n self.gtype = gtype\r\n self.gname = gname\r\n self.periodicity = periodicity\r\n\r\n if self.gtype == 'points':\r\n self.points = self.nx*self.ny*self.nz\r\n elif self.gtype == 'cells':\r\n self.cells = (self.nx-1 if self.nx> 1 else 1)*(self.ny-1 if self.ny> 1 else 1)*(self.nz-1 if self.nz> 1 else 1)\r\n\r\n # Compute the size of the grid\r\n self._lx = None #self.dx * self.nx - self.ox\r\n self._ly = None #self.dy * self.ny - self.oy\r\n self._lz = None #self.dz * self.nz - self.oz\r", "def geo2cell(geofile, posfile):", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def test_starting_template(checker):\n contents = labeled.contents(label=\"template\")\n _ = tomllib.loads(contents)", "def load_frame(self):\n world_map = self.data[self.time_point][\"tiles\"]\n self.tiles = []\n for x in range(self.width):\n for y in range(self.height):\n index = x + self.width * y\n tile = world_map[index]\n xpos = x * tile_size\n ypos = y * tile_size\n if tile[\"type\"] == \"Wall\":\n sprite = pyglet.sprite.Sprite(images[\"Wall\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"SnakeHead\":\n sprite = pyglet.sprite.Sprite(images[\"SnakeHead\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"SnakeBody\":\n sprite = pyglet.sprite.Sprite(images[\"SnakeBody\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"Doodah\":\n sprite = pyglet.sprite.Sprite(images[\"Doodah\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"Blank\":\n sprite = pyglet.sprite.Sprite(images[\"Blank\"], x=xpos, y=ypos)\n self.tiles.append(sprite)", "def to_nii2(self, filepath=None, template='gray', vox_size=None, sample_rate=None):\n from .nifti import Nifti2\n\n if vox_size:\n v_size = vox_size\n else:\n v_size = _vox_size(self.locs)\n\n if np.isscalar(self.minimum_voxel_size):\n mnv = np.multiply(self.minimum_voxel_size, np.ones_like(v_size))\n else:\n mnv = self.minimum_voxel_size\n\n if np.isscalar(self.maximum_voxel_size):\n mxv = np.multiply(self.maximum_voxel_size, np.ones_like(v_size))\n else:\n mxv = self.maximum_voxel_size\n\n if np.any(v_size < self.minimum_voxel_size):\n v_size[v_size < self.minimum_voxel_size] = mnv[v_size < self.minimum_voxel_size]\n\n if np.any(v_size > self.maximum_voxel_size):\n v_size[v_size > self.maximum_voxel_size] = mxv[v_size > self.maximum_voxel_size]\n\n if template is None:\n img = _gray(v_size)\n\n elif type(template) is nib.nifti1.Nifti1Image:\n img = template\n\n elif isinstance(template, str) or isinstance(template, basestring):\n\n if os.path.exists(template):\n img = nib.load(template)\n\n elif template is 'gray':\n img = _gray(v_size)\n\n elif template is 'std':\n img = _std(v_size)\n\n else:\n warnings.warn('template format not supported')\n else:\n warnings.warn('Nifti format not supported')\n\n if sample_rate:\n data, sessions, sample_rate = _resample(self, sample_rate)\n self.data = data\n self.sessions = sessions\n self.sample_rate = sample_rate\n\n\n hdr = img.get_header()\n temp_v_size = hdr.get_zooms()[0:3]\n\n if not np.array_equiv(temp_v_size, v_size):\n warnings.warn('Voxel sizes of reconstruction and template do not match. '\n 'Voxel sizes calculated from model locations.')\n\n nifti = _brain_to_nifti2(self, img)\n\n if filepath:\n nifti.to_filename(filepath)\n\n return nifti", "def test_create_from_file(self):\n # TODO: Expand test to both openeye and rdkit toolkits\n filename = get_data_file_path(\"molecules/toluene.mol2\")\n\n molecule1 = Molecule(filename, allow_undefined_stereo=True)\n with open(filename, \"r\") as infile:\n molecule2 = Molecule(\n infile, file_format=\"MOL2\", allow_undefined_stereo=True\n )\n assert molecule1 == molecule2\n\n import gzip\n\n with gzip.GzipFile(filename + \".gz\", \"r\") as infile:\n molecule3 = Molecule(\n infile, file_format=\"MOL2\", allow_undefined_stereo=True\n )\n assert molecule3 == molecule1\n\n # Ensure that attempting to initialize a single Molecule from a file\n # containing multiple molecules raises a ValueError\n with pytest.raises(ValueError) as exc_info:\n filename = get_data_file_path(\"molecules/zinc-subset-tripos.mol2.gz\")\n molecule = Molecule(filename, allow_undefined_stereo=True)", "def createByImage(path):\n try:\n mapdata = pygame.image.load(path)\n except:\n m = PositionMap()\n m.setWidth(1)\n m.setHeight(1)\n return m\n return createBySurface(mapdata)", "def load_grd(filename):\n with open(filename, 'r') as f:\n meta = {}\n meta['header'] = []\n meta['header'].append(f.readline().rstrip('\\n'))\n while meta['header'][-1] != '++++':\n meta['header'].append(f.readline().rstrip('\\n'))\n # These determine the type of grid and the field format.\n meta['KTYPE'] = int(f.readline().split()[0])\n if meta['KTYPE'] != 1:\n raise ValueError(\"Not implemented.\")\n meta['NSET'], meta['ICOMP'], meta['NCOMP'], meta['IGRID'] = [int(s) for s in f.readline().split()]\n # The grid center in units of the x and y grid spacing.\n meta['IX'], meta['IY'] = [int(s) for s in f.readline().split()]\n # These are the x and y grid limits: S is lower, and E is upper.\n meta['XS'], meta['YS'], meta['XE'], meta['YE'] = [float(s) for s in f.readline().split()]\n # These are the numbers of grid points in x and y.\n meta['NX'], meta['NY'], meta['KLIMIT'] = [int(s) for s in f.readline().split()]\n # Implement this to read elliptically truncated grids.\n if meta['KLIMIT'] != 0:\n raise ValueError(\"Not implemented.\")\n # Load the field data. This returns an array with shape (NX * NY, 2 * NCOMP).\n conv = dict([(column, string_to_float) for column in range(2 * meta['NCOMP'])])\n data = np.loadtxt(f, dtype=float, converters=conv)\n # Determine the grid spacing and center values.\n meta['DX'] = (meta['XE'] - meta['XS']) / (meta['NX'] - 1)\n meta['DY'] = (meta['YE'] - meta['YS']) / (meta['NY'] - 1)\n meta['XCEN'] = meta['DX'] * meta['IX']\n meta['YCEN'] = meta['DY'] * meta['IY']\n # Reshape the data.\n map = np.empty((meta['NX'], meta['NY'], meta['NCOMP']),\n dtype=np.complex)\n for component in range(meta['NCOMP']):\n column = data[:, 2 * component] + 1j * data[:, 2 * component + 1]\n map[:, :, component] = column.reshape(meta['NX'], meta['NY'], order='F')\n return meta, map", "def load_from_poscar(self, filename):\n with open( filename, 'r' ) as F:\n F = open( filename, 'r' )\n self.fileID = filename\n self.name = F.readline()\n scale = float(F.readline())\n self.unit_cell = mat( fromfile( F, dtype('float'), 9, ' ' ).reshape((3,3)) )\n \n # Scale < 0 means that it gives the volume we want to have\n if scale < 0.0:\n scale=(-scale/self.volume)**(1./3.)\n self.unit_cell *= scale\n\n # If the next line does not contain just numbers, then it is treated as a list of species\n line = F.readline()\n self.species = None\n try:\n self.num_per_type = [int(n) for n in line.split()]\n except:\n species = line.split()\n line = F.readline()\n self.num_per_type = [int(n) for n in line.split()]\n self.species = []\n for n in self.num_per_type:\n self.species.extend(n*[species[0]])\n species = species[1:]\n \n self.num_atoms = 0\n self.num_atoms=sum(self.num_per_type)\n \n mode = F.readline()\n \n self.atoms = mat(fromfile( F, dtype('float'), self.num_atoms*3, ' ' ).reshape(( self.num_atoms,3)))\n \n if re.search('^[cCkK]',mode):\n pass\n else:\n self.atoms = self.atoms*self.unit_cell\n \n if self.name.split()[0] == \"SUPERCELL\":\n self.is_supercell = True\n self.supercell_repetitions = self.name.split()[1].split('x')\n self.supercell_repetitions = [int(i) for i in self.supercell_repetitions]", "def __init__(self, image_x, image_y, sublattice_generate_image=True):\n self.data_extent = (image_x, image_y)\n self._image_noise = False\n self._sublattice_generate_image = sublattice_generate_image\n self.__sublattice = Sublattice([], np.zeros((2, 2)))\n self.__sublattice.atom_list = []", "def test_load_system(self, name, file_name):\n json_file = os.path.join(self.geometry_dir, file_name)\n system = MultiBodySystem.from_json(json_file)", "def car_template(ncomp, ra0, ra1, dec0, dec1, res):\n\n if ncomp == 3:\n pre = (3,)\n else:\n pre = ()\n\n box = get_box(ra0, ra1, dec0, dec1)\n res = res * np.pi / (180 * 60)\n temp = so_map()\n shape, wcs = enmap.geometry(box, res=res, pre=pre)\n temp.data = enmap.zeros(shape, wcs=wcs, dtype=None)\n temp.pixel = \"CAR\"\n temp.nside = None\n temp.ncomp = ncomp\n temp.geometry = temp.data.geometry[1:]\n temp.coordinate = \"equ\"\n return temp", "def __init__(self, path, template):\n super(GenerateSpectrum, self).__init__(path)\n self._template = template", "def load_d(prefix):\n vel_x = np.genfromtxt(file('%s_x.csv' % prefix), delimiter=',')\n vel_y = np.genfromtxt(file('%s_y.csv' % prefix), delimiter=',')\n\n # make a 3d height x width x 2 matrix to hold the vectors\n vel = np.zeros(list(vel_x.shape) + [2])\n vel[:, :, 0] = vel_y # note, this y here is correct--and it's important it be this order\n vel[:, :, 1] = vel_x\n return vel", "def from_crystfel_geom(cls, filename):\n geom_dict = load_crystfel_geometry(filename)\n modules = []\n for p in range(cls.n_modules):\n tiles = []\n modules.append(tiles)\n for a in range(cls.n_tiles_per_module):\n d = geom_dict['panels']['p{}a{}'.format(p, a)]\n tiles.append(GeometryFragment.from_panel_dict(d))\n return cls(modules, filename=filename)", "def read_grid(filename_grid, dim=2, slc=None):\n ## get shape and slice\n fid = h5py.File(filename_grid, 'r')\n if dim==2:\n varnames = ['x', 'y', 'ep']\n if slc is None: slc = np.s_[0,:,:]\n if dim==3:\n varnames = ['x', 'y', 'z', 'ep']\n if slc is None: slc = np.s_[:,:,:]\n\n dset = fid.get(varnames[0])\n shape = dset[slc].shape\n (nx,ny,nz) = dset.shape\n ## read variables\n grid = {}\n for varname in varnames:\n try:\n dset = fid.get(varname)\n grid[varname] = np.zeros(shape)\n dset.read_direct(grid[varname], source_sel=slc)\n grid[varname] = grid[varname].transpose()\n except:\n pass\n fid.close()\n return grid, nx, ny, nz", "def make(self, width=1500.0, height=1000.0):\n return self._meta.template1(width, height)", "def _load_chan(chan):\n # Load the coordinates template :\n path = sys.modules[__name__].__file__.split('objects')[0]\n path = os.path.join(path, 'topo')\n file = np.load(os.path.join(path, 'eegref.npz'))\n nameRef, xyzRef = file['chan'], file['xyz']\n keeponly = np.ones((len(chan)), dtype=bool)\n # nameRef = list(nameRef)\n # Find and load xyz coordinates :\n xyz = np.zeros((len(chan), 3), dtype=np.float32)\n for num, k in enumerate(chan):\n # Find if the channel is present :\n idx = np.where(nameRef == k)[0]\n if idx.size:\n xyz[num, 0:2] = np.array(xyzRef[idx[0], :])\n else:\n keeponly[num] = False\n\n return np.array(xyz), keeponly", "def d3_world_map():\n\n return render_template(\"world.html\")", "def read_2d(fpath):\n #check_isfile(fpath)\n objs = []\n ids = []\n with open(fpath, 'r') as f:\n for ff in f:\n id_position, players_x, players_y = token_position(ff)\n objs.append((players_x, players_y))\n ids.append(id_position)\n objs = np.array(objs)\n ids = np.array(ids)\n return ids, objs", "def _load_a_couple0(self, path):\n assert(self._initialisation)\n X = pd.read_hdf(path[0], key='s')\n Y = np.load(path[1])\n return X , Y", "def __init__(self, filename, topology_file=None, grid_type=1,\n extrapolate=False, time_offset=0,\n **kwargs):\n self._grid_type = grid_type\n\n self.filename = filename\n self.topology_file = topology_file\n\n if self._grid_type == 1:\n self.grid = CyTimeGridWindRect(filename)\n elif self._grid_type == 2:\n self.grid = CyTimeGridWindCurv(filename, topology_file)\n else:\n raise Exception('grid_type not implemented ')\n\n self.grid.load_data(filename, topology_file)\n\n super(Grid, self).__init__(**kwargs)", "def generate_surface_temp(self):\n tcl_name = output_folder + \"/surface_output_\" + str(self.input_pdb_path).split(\"/\")[-1][0:-4] + \"_\" + str(self.current_chain) + \".tcl\"\n opened_file = open(tcl_name, \"w\")\n writeable_string = surface(self.input_pdb_path).surface_template(chain = str(self.current_chain))\n opened_file.write(writeable_string)", "def load_tile(path, tile_size):\n img = pyglet.resource.image(path)\n img.width = tile_size\n img.height = tile_size\n return img", "def fixture_grid():\n return load_earth_relief(registration=\"pixel\")", "def read_ice_grid(path):\n grid = xr.open_rasterio(path).squeeze()\n # Deproject coords\n proj = Proj('+proj=merc +lon_0=0 +k=1 +x_0=0 ' +\n '+y_0=-24 +datum=WGS84 +units=m +no_defs')\n lon, lat = proj(grid.coords['x'].values, grid.coords['y'].values,\n inverse=True)\n grid = grid.assign_coords(x=lon, y=lat)\n return grid", "def __init__ (self, prototype_filename = None, originX = None, originY = None, cell_Width = None,\n cell_Height = None, ncols = None, nrows = None):\n self.prototype_filename = prototype_filename\n if self.prototype_filename is None:\n self.originX = originX\n self.originY = originY\n self.cell_Width = cell_Width\n self.cell_Height = cell_Height\n self.ncols = ncols\n self.nrows = nrows\n read_existing_raster = False\n else: \n read_existing_raster = True\n \n if read_existing_raster:\n try:\n self.ras = self.read_raster (self.prototype_filename) # read the raster\n raster = gdal.Open (self.prototype_filename)\n geotransform = raster.GetGeoTransform()\n self.originX = geotransform[0]\n self.originY = geotransform[3]\n self.cell_Width = geotransform[1]\n self.cell_Height = geotransform[5]\n self.ncols = self.ras.shape[1] # set nrows and cols as local variables for convenience\n self.nrows = self.ras.shape[0]\n except:\n print ('ERROR: raster read error')\n else:\n try:\n self.ras = np.zeros ((self.nrows, self.ncols)) * np.nan\n except:\n print ('ERROR: raster creation error')\n \n # calculate the indices\n self.x_index = np.zeros (self.ncols) * np.nan\n for i in range(0, self.ncols):\n self.x_index[i] = (i * self.cell_Width) + self.originX + (self.cell_Width / 2.0)\n\n self.y_index = np.zeros (self.nrows) * np.nan\n for j in range(0, self.nrows):\n # reverse index for rows, so lookups can be more natural\n ind = self.nrows - j - 1\n self.y_index[ind] = (j * self.cell_Height) + self.originY + (self.cell_Height / 2.0)\n\n return", "def test_2d_object(gridsize=50):\n\n obj = object25d() \n\n #you can load a 3D object, Z axis gets ignored\n obj.load('objects/sphere.obj')\n\n #obj.prim_square()\n #obj.prim_triangle()\n\n #obj.save('2d_square.obj')\n\n #rotate 45 degrees \n m22 = matrix22()\n m22.from_euler(45)\n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n\n #rotate the points by matrix multiplication \n obj.points = m22.batch_mult_pts( obj.points ) \n\n #saving a 2d object from 3D flattens it on Z axis. \n #utterly mangles the topology \n obj.save('2d_rotated.obj')\n\n #bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=200, pfb=fb)\n bloody_simple_2drender('2d_rotation.png', obj=[obj], gridsize=200, pfb=fb)\n\n fb.save('2d_rotation.png')", "def get_tile(tilefile,level,x,y):\n\t\n\ttf=file(tilefile,\"r\")\n\t\n\ttd=pickle.load(tf)\n\ta=td[(level,x,y)]\n\t\n\ttf.seek(a[0],1)\n\tret=tf.read(a[1])\n\t\n\ttf.close()\n\treturn ret", "def _add_template(self):\n template_dir = os.path.join(self.label_path, 'standard',\n 'MNI152_T1_2mm_brain.nii.gz')\n template_name = QFileDialog.getOpenFileName(\n self,\n 'Open standard file',\n template_dir,\n 'Nifti files (*.nii.gz *.nii)')\n if not template_name.isEmpty():\n template_path = str(template_name)\n self._add_img(template_path)", "def load_file(self, mapfile):\n tmx = pytmx.TiledMap(mapfile)\n self.width = tmx.width\n self.height = tmx.height\n self.load_images(tmx)\n self.load_floor(tmx)\n self.load_objects(tmx)\n self.load_pois(tmx)", "def read_grid2d(grid_file):\n labels = []\n with grid_file.open('r') as f:\n for row in f.readlines():\n labels.append([x.strip() for x in row.split('\\t')])\n\n labels = array(labels)\n grid2d = make_grid(labels.shape[0], labels.shape[1])\n grid2d['label'] = labels\n return grid2d", "def from_template(self, matrix_template):\n self.set_format(matrix_template['format'])\n\n if matrix_template['limit']:\n self.set_limit(matrix_template['limit'])\n self.get_size().rows = int(matrix_template['height'])\n self.get_size().cols = int(matrix_template['width'])\n\n # zera a situação da Matrix\n self.__strategy.init(self)\n self.__strategy.from_template(self, matrix_template['data'])", "def __init__(self, size):\n self.world = [[None for y in range(size)] for x in range(size)]", "def load_data(self):\n @Logger.runtime\n def process_coords():\n \"\"\"\n The placement of locations on our minimap is crucial. Panda3D objects however have a coordinate range from\n -1 to 1 on all axis, meaning that if we read a coordinate of a location from some image processing software\n by hand, we have to transform those coordinates into coordinates Panda would understand. This function does\n just that.\n :return: Normalized coordinates of location coordinates.\n \"\"\"\n split_coords = row[\"map_coord\"].split(',')\n map_x, map_y = [int(i) for i in split_coords]\n map_x_normed = ((map_x*2) / self.MINIMAP_DIM) - 1\n map_y_normed = -(((map_y*2) / self.MINIMAP_DIM) - 1)\n return map_x_normed, map_y_normed\n\n @Logger.runtime\n def process_texture():\n texture_path = Path(\"resource/textures/{}\".format(row[\"texture\"]))\n texture = self.loader.loadTexture(texture_path)\n return texture\n\n # the cylinder is loaded here but it does not yet show up, until it's specifically asked to\n self.scene_3d_model = self.loader.loadModel(self.PATHS[\"3D_SCENE_MODEL\"])\n\n try:\n with open(self.PATHS[\"LOCATIONS_DB\"], \"r\") as l_file:\n data = csv.DictReader(l_file, delimiter=\"|\")\n for row in data:\n id = int(row[\"id\"])\n x, y = process_coords()\n neighbors = [int(neighbor_id) for neighbor_id in row[\"neighbors\"].split(',')]\n texture = process_texture()\n location = Location(id, x, y, neighbors, texture)\n location.reparentTo(self.render2d)\n self.locations.append(location)\n Logger.log_info('The locations_db has been loaded')\n except:\n Logger.error('{} file not found!'.format(self.PATHS[\"LOCATIONS_DB\"]))\n\n self.active_location = self.locations[0]", "def load(self):\n\n if self.loaded:\n return\n\n self.region_back = None\n self.objects = []\n self.plants = []\n self.tiles = []\n\n # Some convenience vars\n materials = self.data.materials\n matmods = self.data.matmods\n objects = self.data.objects\n plants = self.data.plants\n world = self.world\n self.loaded = True\n\n # Get tiles\n try:\n data_tiles = world.get_tiles(self.rx, self.ry)\n except KeyError:\n print('WARNING: Region ({}, {}) was not found in world'.format(self.rx, self.ry))\n return\n\n # \"real\" coordinates\n base_x = self.rx*32\n gui_x = base_x*8\n base_y = self.ry*32\n gui_y = (world.height*8)-(base_y*8)\n\n # Background for our drawn area (black)\n self.region_back = self.scene.addRect(gui_x, gui_y-255, 255, 255,\n QtGui.QPen(QtGui.QColor(0, 0, 0)),\n QtGui.QBrush(QtGui.QColor(0, 0, 0)),\n )\n self.region_back.setZValue(Constants.z_black)\n\n # Tiles!\n cur_row = 0\n cur_col = 0\n for data_tile in data_tiles:\n self.tiles.append(GUITile(self.scene, data_tile,\n base_x+cur_col, base_y+cur_row,\n self,\n gui_x+cur_col*8, gui_y-(cur_row+1)*8,\n self.layer_toggles))\n self.scene.addItem(self.tiles[-1])\n cur_col += 1\n if cur_col == 32:\n cur_col = 0\n cur_row += 1\n\n # Entities!\n entities = []\n try:\n entities = world.get_entities(self.rx, self.ry)\n except KeyError:\n pass\n\n for e in entities:\n if e.name == 'ObjectEntity':\n obj_name = e.data['name']\n obj_orientation = e.data['orientationIndex']\n (obj_x, obj_y) = tuple(e.data['tilePosition'])\n if obj_name in objects:\n obj = objects[obj_name]\n (image, offset_x, offset_y) = obj.get_image(obj_orientation)\n qpmi = QtWidgets.QGraphicsPixmapItem(image)\n qpmi.setPos(\n (obj_x*8) + offset_x,\n (world.height*8)-(obj_y*8) - offset_y - image.height(),\n )\n qpmi.setZValue(Constants.z_objects)\n if not self.layer_toggles.objects_toggle.isChecked():\n qpmi.setVisible(False)\n self.scene.addItem(qpmi)\n self.objects.append(qpmi)\n rel_x = obj_x - base_x\n rel_y = obj_y - base_y\n tile_idx = rel_y*32 + rel_x\n self.tiles[tile_idx].add_object(obj, obj_name, obj_orientation, qpmi, e.data)\n elif e.name == 'PlantEntity':\n desc = e.data['descriptions']['description']\n images = []\n (obj_x, obj_y) = tuple(e.data['tilePosition'])\n for piece in e.data['pieces']:\n piece_img = piece['image'].split('?')[0]\n if piece_img in plants:\n img = plants[piece_img].image\n qpmi = QtWidgets.QGraphicsPixmapItem(img)\n qpmi.setPos(\n (obj_x*8) + (piece['offset'][0]*8),\n (world.height*8)-(obj_y*8) - (piece['offset'][1]*8) - img.height(),\n )\n qpmi.setZValue(Constants.z_plants)\n if not self.layer_toggles.plants_toggle.isChecked():\n qpmi.setVisible(False)\n images.append((plants[piece_img], qpmi))\n self.scene.addItem(qpmi)\n self.plants.append(qpmi)\n else:\n print('not found: {}'.format(piece_img))\n rel_x = obj_x - base_x\n rel_y = obj_y - base_y\n tile_idx = rel_y*32 + rel_x\n self.tiles[tile_idx].add_plant(desc, images)\n elif (e.name == 'MonsterEntity'\n or e.name == 'NpcEntity'\n or e.name == 'StagehandEntity'\n or e.name == 'ItemDropEntity'\n or e.name == 'VehicleEntity'\n ):\n # TODO: Ignoring for now\n pass\n else:\n print('Unknown entity type: {}'.format(e.name))", "def read_grid(self, file_path=None):\n print('[info] reading the grid ...')\n if not file_path:\n file_path = os.path.join(self.directory, 'grid.dat')\n if not os.path.exists(file_path):\n file_path = os.path.join(self.directory, 'grid.txt')\n # test if file written in binary format\n textchars = bytearray({7, 8, 9, 10, 12, 13, 27}\n | set(range(0x20, 0x100)) - {0x7f})\n is_binary_string = lambda bytes: bool(bytes.translate(None, textchars))\n infile = open(file_path, 'rb')\n binary_format = is_binary_string(infile.read(1024))\n infile.close()\n if binary_format:\n with open(file_path, 'rb') as infile:\n # x-direction\n nx = struct.unpack('i', infile.read(4))[0]\n x = numpy.array(struct.unpack('d' * (nx + 1),\n infile.read(8 * (nx + 1))))\n # y-direction\n ny = struct.unpack('i', infile.read(4))[0]\n y = numpy.array(struct.unpack('d' * (ny + 1),\n infile.read(8 * (ny + 1))))\n self.grid = numpy.array([x, y])\n else:\n with open(file_path, 'r') as infile:\n n_cells = numpy.array([int(n)\n for n in infile.readline().strip().split()])\n coords = numpy.loadtxt(infile, dtype=numpy.float64)\n self.grid = numpy.array(numpy.split(coords,\n numpy.cumsum(n_cells[:-1] + 1)))\n if self.grid.size == 2:\n print('\\tgrid-size: {}x{}'.format(self.grid[0].size - 1,\n self.grid[1].size - 1))\n elif self.grid.size == 3:\n print('\\tgrid-size: {}x{}x{}'.format(self.grid[0].size - 1,\n self.grid[1].size - 1,\n self.grid[2].size - 1))", "def load_particle_ic(self, file_name):\n \n\n data = np.genfromtxt(file_name, names = True)\n\n self.N_part = np.size(data['x'])\n\n self.pos = np.array([data['x'], data['y'], data['z']])\n self.pos = self.pos.T.reshape(self.N_part,3)\n self.vel = np.array([data['vx'], data['vy'], data['vz']])\n self.vel = self.vel.T.reshape(self.N_part,3)\n \n self.M_part = data['m'][0] # assuming all particles have same mass\n\n _my_print('loaded %6i particles from '%(self.N_part) + file_name)\n return", "def __init__(self, fname):\n f = zopen(fname, \"rt\")\n\n # skip header lines\n for i in range(2):\n f.readline()\n\n # number of atoms included in the file followed by the position of the origin of the volumetric data\n line = f.readline().split()\n self.natoms = int(line[0])\n self.origin = np.array(np.array(list(map(float, line[1:]))))\n\n # The next three lines give the number of voxels along each axis (x, y, z) followed by the axis vector.\n line = f.readline().split()\n self.NX = int(line[0])\n self.X = np.array([bohr_to_angstrom * float(l) for l in line[1:]])\n\n line = f.readline().split()\n self.NY = int(line[0])\n self.Y = np.array([bohr_to_angstrom * float(l) for l in line[1:]])\n\n line = f.readline().split()\n self.NZ = int(line[0])\n self.Z = np.array([bohr_to_angstrom * float(l) for l in line[1:]])\n\n self.voxelVolume = abs(np.dot(np.cross(self.X, self.Y), self.Z))\n self.volume = abs(np.dot(np.cross(self.X.dot(self.NZ), self.Y.dot(self.NY)), self.Z.dot(self.NZ)))\n\n # The last section in the header is one line for each atom consisting of 5 numbers,\n # the first is the atom number, second is charge, the last three are the x,y,z coordinates of the atom center.\n self.sites = []\n for i in range(self.natoms):\n line = f.readline().split()\n self.sites.append(Site(line[0], np.multiply(bohr_to_angstrom, list(map(float, line[2:])))))\n\n self.structure = Structure(\n lattice=[self.X * self.NX, self.Y * self.NY, self.Z * self.NZ],\n species=[s.specie for s in self.sites],\n coords=[s.coords for s in self.sites],\n coords_are_cartesian=True,\n )\n\n # Volumetric data\n self.data = np.zeros((self.NX, self.NY, self.NZ))\n i = 0\n for s in f:\n for v in s.split():\n self.data[\n int(i / (self.NY * self.NZ)),\n int((i / self.NZ) % self.NY),\n int(i % self.NZ),\n ] = float(v)\n i += 1", "def test_create_from_file(self):\n # TODO: Expand test to both openeye and rdkit toolkits\n filename = get_data_file_path(\"molecules/toluene.mol2\")\n\n molecule1 = Molecule(filename, allow_undefined_stereo=True)\n with open(filename, \"r\") as infile:\n molecule2 = Molecule(\n infile, file_format=\"MOL2\", allow_undefined_stereo=True\n )\n assert molecule1 == molecule2\n\n import gzip\n\n with gzip.GzipFile(filename + \".gz\", \"r\") as infile:\n molecule3 = Molecule(\n infile, file_format=\"MOL2\", allow_undefined_stereo=True\n )\n assert molecule3 == molecule1\n\n # Ensure that attempting to initialize a single Molecule from a file\n # containing multiple molecules raises a ValueError\n filename = get_data_file_path(\"molecules/butane_multi.sdf\")\n\n with pytest.raises(\n ValueError,\n match=\"Specified file or file-like.*exactly one molecule\",\n ):\n Molecule(filename, allow_undefined_stereo=True)", "def read_xyz(filename):\n\n config = {}\n\n with open(filename, 'r') as f:\n # number of atoms (spins)\n config['nat'] = int(re.findall('\\S+', f.readline())[0])\n\n # box parameters (type, dimension, shape, periodicity)\n sarr = re.findall('\\S+', f.readline())\n config['latt_type'] = sarr[0]\n dims = list(map(int, sarr[1:4]))\n config['latt_box'] = np.array(dims)\n config['box'] = np.diag(dims)\n config['pbc'] = list(map(int, sarr[4:7]))\n if len(sarr) > 7:\n dim_intra = len(sarr) - 7\n\n atom_types = []\n xyz = []\n config['latt_i'] = np.zeros(dims, dtype=int)\n config['latt_atoms'] = np.zeros(dims, dtype=int)\n config['latt_intra'] = np.zeros(tuple(dims) + (dim_intra,), dtype='float64')\n for i in range(config['nat']):\n sarr = re.findall('\\S+', f.readline())\n t = int(sarr[0])\n r = tuple(map(int, sarr[1:4]))\n\n atom_types.append(t)\n xyz.append(r)\n\n config['latt_i'][r] = i\n config['latt_atoms'][r] = t\n\n for j in range(dim_intra):\n ci = float(sarr[4 + j])\n config['latt_intra'][r[0], r[1], r[2], j] = ci\n\n config['atom_types'] = np.array(atom_types)\n config['xyz'] = np.array(xyz)\n \n return config", "def create_object(self, confM2R, grd_filename):\n ds = xr.open_dataset(grd_filename)\n\n if self.type == 'FORCINGDATA':\n\n logging.info(\"[M2R_grd] ---> Assuming {} grid type for {}\".format(confM2R.grd_type, self.type))\n logging.info(\"[M2R_grd] ---> Using dimension names {} and {} and {}\".format(confM2R.lon_name,\n confM2R.lat_name,\n confM2R.depth_name))\n\n self.lon = ds[str(confM2R.lon_name)][:]\n self.lat = ds[str(confM2R.lat_name)][:]\n self.h = ds[str(confM2R.depth_name)][:]\n self.nlevels = len(self.h)\n self.fillval = -9.99e+33\n self.hc = None\n\n if self.lon.ndim == 1:\n self.lon, self.lat = np.meshgrid(self.lon, self.lat)\n\n # Create grid for ESMF interpolation\n\n self.esmfgrid = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True, coord_names=[str(confM2R.lon_name), str(confM2R.lat_name)],\n add_mask=False)\n self.esmfgrid_u = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=[str(confM2R.lon_name_u), str(confM2R.lat_name_u)],\n add_mask=False)\n self.esmfgrid_v = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=[str(confM2R.lon_name_v), str(confM2R.lat_name_v)],\n add_mask=False)\n\n if confM2R.ocean_indata_type == 'SODA3':\n self.fillval = -1.e+20\n if confM2R.ocean_indata_type == 'SODA3_5DAY':\n self.fillval = -1.e+20\n if confM2R.ocean_indata_type == 'GLORYS':\n self.fillval = 9.96921e+36\n\n if confM2R.ocean_indata_type == 'NORESM':\n # self.h = ds[\"depth\"][:]\n self.h = np.asarray([0, 5, 10, 15, 20, 25, 30, 40, 50, 62.5, 75, 87.5, 100, 112.5, 125,\n 137.5, 150, 175, 200, 225, 250, 275, 300, 350, 400, 450, 500, 550, 600,\n 650, 700, 750, 800, 850, 900, 950, 1000, 1050, 1100, 1150, 1200, 1250,\n 1300, 1350, 1400, 1450, 1500, 1625, 1750, 1875, 2000, 2250, 2500, 2750,\n 3000, 3250, 3500, 3750, 4000, 4250, 4500, 4750, 5000, 5250, 5500, 5750,\n 6000, 6250, 6500, 6750])\n self.fillval = 32768\n self.nlevels = len(self.h)\n\n IOverticalGrid.get_z_levels(self)\n\n if self.type == 'STATION':\n self.lon = ds[confM2R.lon_name][:]\n self.lat = ds[confM2R.lat_name][:]\n self.h = ds[confM2R.depth_name][:]\n self.time = ds[confM2R.time_name][:]\n\n self.Lp = 1\n self.Mp = 1\n self.fillval = -9.99e+33\n\n if self.type in ['ROMS']:\n\n self.write_clim = True\n self.write_bry = True\n self.write_init = True\n self.write_stations = False\n\n self.lonname = 'lon_rho'\n self.latname = 'lat_rho'\n\n \"\"\"\n Set initTime to 1 if you dont want the first time-step to be\n the initial field (no ubar and vbar if time=0)\n \"\"\"\n\n self.inittime = 0\n self.ocean_time = 0\n self.NT = 2\n self.tracer = self.NT\n\n self.message = None # Used to store the date for printing to screen (IOwrite.py)\n self.time = 0\n self.reftime = 0\n self.grdtype = 'regular'\n self.mask_rho = ds[\"mask_rho\"][:, :]\n self.lon_rho = ds[\"lon_rho\"][:, :]\n self.lat_rho = ds[\"lat_rho\"][:, :]\n self.h = ds[\"h\"][:, :]\n\n masked_h = np.where(self.h > 0, self.h, self.h.max())\n\n self.hmin = masked_h.min()\n if \"Vtransform\" in ds.variables:\n self.vtransform = ds[\"Vtransform\"].values\n else:\n self.vtransform = confM2R.vtransform\n\n if \"s_rho\" in ds.variables:\n self.s_rho = ds[\"s_rho\"].values\n self.nlevels = len(self.s_rho)\n else:\n self.nlevels = confM2R.nlevels\n\n if \"Vstretching\" in ds.variables:\n self.vstretching = ds[\"Vstretching\"].values\n if \"theta_s\" in ds.variables:\n self.theta_s = ds[\"theta_s\"].values\n else:\n self.theta_s = confM2R.theta_s\n if \"theta_b\" in ds.variables:\n self.theta_b = ds[\"theta_b\"].values\n else:\n self.theta_b = confM2R.theta_b\n if \"Tcline\" in ds.variables:\n self.tcline = ds[\"Tcline\"].values\n else:\n self.tcline = confM2R.tcline\n if \"hc\" in ds.variables:\n self.hc = ds[\"hc\"].values\n else:\n self.hc = confM2R.hc\n\n if self.vtransform == 1:\n self.hc = min(self.hmin, self.tcline)\n self.hc = self.tcline\n if self.tcline > self.hmin:\n print('Vertical transformation parameters are not defined correctly in either gridid.txt '\n 'or in the history files: \\n Tc\\\n line = %d and hmin = %d. \\n You need to make sure that '\n 'tcline <= hmin when using transformation 1.' % (\n self.tcline, self.hmin))\n else:\n self.hc = self.tcline\n\n zeta = None\n if zeta is None:\n self.zeta = np.zeros(self.h.shape)\n else:\n self.zeta = zeta\n\n # for findvar in ds:\n # if findvar==\"hraw\":\n # self.hraw = ds[\"hraw\"][:,:,:]\n\n self.lon_u = ds[\"lon_u\"][:, :]\n self.lat_u = ds[\"lat_u\"][:, :]\n self.mask_u = ds[\"mask_u\"][:, :]\n for findvar in ds:\n if findvar == \"lon_vert\":\n self.lon_vert = ds[\"lon_vert\"][:, :]\n self.lat_vert = ds[\"lat_vert\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_rho\":\n self.x_rho = ds[\"x_rho\"][:, :]\n self.y_rho = ds[\"y_rho\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_u\":\n self.x_u = ds[\"x_u\"][:, :]\n self.y_u = ds[\"y_u\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_v\":\n self.x_v = ds[\"x_v\"][:, :]\n self.y_v = ds[\"y_v\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_psi\":\n self.x_psi = ds[\"x_psi\"][:, :]\n self.y_psi = ds[\"y_psi\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_vert\":\n self.x_vert = ds[\"x_vert\"][:, :]\n self.y_vert = ds[\"y_vert\"][:, :]\n\n for findvar in ds:\n if findvar == \"xl\":\n self.xl = ds[\"xl\"]\n self.el = ds[\"el\"]\n\n for findvar in ds:\n if findvar == \"dmde\":\n self.dmde = ds[\"dmde\"][:, :]\n self.dndx = ds[\"dndx\"][:, :]\n\n self.lon_v = ds[\"lon_v\"][:, :]\n self.lat_v = ds[\"lat_v\"][:, :]\n self.mask_v = ds[\"mask_v\"][:, :]\n\n # self.spherical = ds[\"spherical\"][:]\n\n self.lon_psi = self.lon_u[:-1, :]\n self.lat_psi = self.lat_v[:, :-1]\n self.mask_psi = self.mask_v[:, :-1]\n\n # self.f = ds[\"f\"][:, :]\n self.angle = ds[\"angle\"][:, :]\n\n self.pm = ds[\"pm\"][:, :]\n self.invpm = 1.0 / np.asarray(ds[\"pm\"][:, :])\n self.pn = ds[\"pn\"][:, :]\n self.invpn = 1.0 / np.asarray(ds[\"pn\"][:, :])\n\n self.Lp = len(self.lat_rho[1, :])\n self.Mp = len(self.lat_rho[:, 1])\n\n self.fillval = -9.99e33\n\n self.eta_rho = self.Mp\n self.eta_u = self.Mp\n self.eta_v = self.Mp - 1\n self.eta_psi = self.Mp - 1\n self.xi_rho = self.Lp\n self.xi_u = self.Lp - 1\n self.xi_v = self.Lp\n self.xi_psi = self.Lp - 1\n\n # Boolean to check if we need to initialize the CLIM file before writing\n self.ioClimInitialized = False\n self.ioInitInitialized = False\n\n if self.lon_rho.ndim == 1:\n self.lon_rho, self.lat_rho = np.meshgrid(self.lon_rho, self.lat_rho)\n self.lon_u, self.lat_u = np.meshgrid(self.lon_u, self.lat_u)\n self.lon_v, self.lat_v = np.meshgrid(self.lon_v, self.lat_v)\n\n # Setup the vertical coordinate system\n IOverticalGrid.calculateVgrid(self)\n\n self.esmfgrid_u = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n coord_names=['lon_u', 'lat_u'],\n is_sphere=True,\n add_mask=False)\n self.esmfgrid_v = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=['lon_v', 'lat_v'],\n add_mask=False)\n self.esmfgrid = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=[self.lonname, self.latname],\n add_mask=False)", "def read_xyz(self, filename):\n # first line contains number of atoms\n self.numatom = int(filename.readline().split()[0])\n # second line contains a comment\n self.comment = filename.readline()[:-3]\n # rest of the lines contain coordinates structured Element X Y Z\n string = \"Element X Y Z \\n\" + filename.read()\n self.contents = pd.read_table(StringIO(string), sep=r'\\s+')", "def _read_dx(self, FN):\n if FN.endswith('.dx'):\n F = open(FN, 'r')\n else:\n import gzip\n F = gzip.open(FN, 'r')\n\n # Read the header\n line = F.readline()\n while line.find('object') == -1:\n line = F.readline()\n header = {}\n header['counts'] = [int(x) for x in line.split(' ')[-3:]]\n for name in ['origin', 'd0', 'd1', 'd2']:\n header[name] = [float(x) for x in F.readline().split(' ')[-3:]]\n F.readline()\n header['npts'] = int(F.readline().split(' ')[-3])\n\n # Test to make sure the grid type is okay.\n # These conditions are not absolultely essential,\n # but they reduce the number of subtraction operations.\n if not (header['d0'][1] == 0 and header['d0'][2] == 0\n and header['d1'][0] == 0 and header['d1'][2] == 0\n and header['d2'][0] == 0 and header['d2'][1] == 0):\n raise Exception('Trilinear grid must be in original basis')\n if not (header['d0'][0] > 0 and header['d1'][1] > 0\n and header['d2'][2] > 0):\n raise Exception('Trilinear grid must have positive coordinates')\n\n # Read the data\n vals = np.ndarray(shape=header['npts'], dtype=float)\n index = 0\n while index < header['npts']:\n line = F.readline()[:-1]\n items = [float(item) for item in line.split()]\n vals[index:index + len(items)] = items\n index = index + len(items)\n F.close()\n\n data = {\n 'origin':np.array(header['origin']), \\\n 'spacing':np.array([header['d0'][0],header['d1'][1],header['d2'][2]]), \\\n 'counts':np.array(header['counts']), \\\n 'vals':vals}\n return data", "def openTMX(self, fn):\n\n #parse the TMX XML markup\n tree = ET.parse(fn)\n root = tree.getroot()\n self.size = int(root.attrib[\"width\"]), int(root.attrib[\"height\"])\n\n #find the offset at which the collision and behaviour layers tile data is stored\n collisionTilesetOffset = None\n behaviourTilesetOffset = None\n for ts in root.findall(\"tileset\"):\n if ts.attrib[\"name\"] == \"collision\":\n collisionTilesetOffset = int(ts.attrib[\"firstgid\"])-1\n elif ts.attrib[\"name\"] == \"behaviour\":\n behaviourTilesetOffset = int(ts.attrib[\"firstgid\"])-1\n if collisionTilesetOffset is None:\n raise error.DittoInvalidResourceException(fn, \"Collision tileset\")\n if behaviourTilesetOffset is None:\n raise error.DittoInvalidResourceException(fn, \"Behaviour tileset\")\n\n #create each layer, separating the collision and behaviour data\n self.layers = []\n self.collisionLayer = None\n self.behaviourLayer = None\n for layer in root.findall(\"layer\"):\n l = Layer()\n l.openTMXNode(layer)\n if l.level == -1: #collision layer indicated by level == -1\n self.collisionLayer = l\n elif l.level == -2:\n self.behaviourLayer = l\n else:\n self.layers.append(l)\n if self.collisionLayer is None:\n raise error.DittoInvalidResourceException(fn, \"Collision data layer\")\n if self.behaviourLayer is None:\n raise error.DittoInvalidResourceException(fn, \"Behaviour data layer\")\n\n #compensate for tilesets not starting at 1\n self.collisionLayer.offsetElements(collisionTilesetOffset)\n self.behaviourLayer.offsetElements(behaviourTilesetOffset)", "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r", "def load_raster_xyz(self, filename):\n with rasterio.open(filename, 'r') as src:\n ## Alias 'affine' no longer works for 'transform'\n ##matrix = src.affine\n matrix = src.transform\n self.size = (src.width, src.height)\n # read per scan line\n for row in range(0, src.height):\n window = ((row, row+1), (0, src.width))\n data = src.read(window=window)\n this_row = data[0][0]\n for column in range(0, src.width):\n x, y = matrix * (column, row)\n yield x, y, this_row[column]", "def load_ref_system():\n return psr.make_system(\"\"\"\n H 1.5205 -0.1372 2.5286\n C 0.9575 -0.0905 1.5914\n C -0.4298 -0.1902 1.6060\n H -0.9578 -0.3156 2.5570\n C -1.1520 -0.1316 0.4215\n H -2.2452 -0.2104 0.4492\n C -0.4779 0.0324 -0.7969\n N -1.2191 0.2008 -2.0081\n H -2.0974 -0.2669 -1.9681\n H -0.6944 -0.0913 -2.8025\n C 0.9208 0.1292 -0.8109\n H 1.4628 0.2560 -1.7555\n C 1.6275 0.0685 0.3828\n H 2.7196 0.1470 0.3709\n \"\"\")", "def load(template):\n with open(template) as f:\n return f.read()", "def load_templates(fwhm=400, line_complexes=True, stars=False,\n full_line_list=None, continuum_list=None,\n fsps_templates=False, alf_template=False):\n \n if stars:\n # templates = glob.glob('%s/templates/Pickles_stars/ext/*dat' %(os.getenv('GRIZLI')))\n # templates = []\n # for t in 'obafgkmrw':\n # templates.extend( glob.glob('%s/templates/Pickles_stars/ext/uk%s*dat' %(os.getenv('THREEDHST'), t)))\n # templates.extend(glob.glob('%s/templates/SPEX/spex-prism-M*txt' %(os.getenv('THREEDHST'))))\n # templates.extend(glob.glob('%s/templates/SPEX/spex-prism-[LT]*txt' %(os.getenv('THREEDHST'))))\n # \n # #templates = glob.glob('/Users/brammer/Downloads/templates/spex*txt')\n # templates = glob.glob('bpgs/*ascii')\n # info = catIO.Table('bpgs/bpgs.info')\n # type = np.array([t[:2] for t in info['type']])\n # templates = []\n # for t in 'OBAFGKM':\n # test = type == '-%s' %(t)\n # so = np.argsort(info['type'][test])\n # templates.extend(info['file'][test][so])\n # \n # temp_list = OrderedDict()\n # for temp in templates:\n # #data = np.loadtxt('bpgs/'+temp, unpack=True)\n # data = np.loadtxt(temp, unpack=True)\n # #data[0] *= 1.e4 # spex\n # scl = np.interp(5500., data[0], data[1])\n # name = os.path.basename(temp)\n # #ix = info['file'] == temp\n # #name='%5s %s' %(info['type'][ix][0][1:], temp.split('.as')[0])\n # print(name)\n # temp_list[name] = utils.SpectrumTemplate(wave=data[0],\n # flux=data[1]/scl)\n \n # np.save('stars_bpgs.npy', [temp_list])\n \n \n # tall = np.load(os.path.join(os.getenv('GRIZLI'), \n # 'templates/stars.npy'))[0]\n # \n # return tall\n # \n # temp_list = OrderedDict()\n # for k in tall:\n # if k.startswith('uk'):\n # temp_list[k] = tall[k]\n # \n # return temp_list\n # \n # for t in 'MLT':\n # for k in tall:\n # if k.startswith('spex-prism-'+t):\n # temp_list[k] = tall[k]\n # \n # return temp_list\n \n #return temp_list\n templates = ['M6.5.txt', 'M8.0.txt', 'L1.0.txt', 'L3.5.txt', 'L6.0.txt', 'T2.0.txt', 'T6.0.txt', 'T7.5.txt']\n templates = ['stars/'+t for t in templates]\n else:\n ## Intermediate and very old\n # templates = ['templates/EAZY_v1.0_lines/eazy_v1.0_sed3_nolines.dat', \n # 'templates/cvd12_t11_solar_Chabrier.extend.skip10.dat'] \n templates = ['eazy_intermediate.dat', \n 'cvd12_t11_solar_Chabrier.dat']\n \n ## Post starburst\n #templates.append('templates/UltraVISTA/eazy_v1.1_sed9.dat')\n templates.append('post_starburst.dat')\n \n ## Very blue continuum\n #templates.append('templates/YoungSB/erb2010_continuum.dat')\n templates.append('erb2010_continuum.dat')\n \n ### Test new templates\n # templates = ['templates/erb2010_continuum.dat',\n # 'templates/fsps/tweak_fsps_temp_kc13_12_006.dat',\n # 'templates/fsps/tweak_fsps_temp_kc13_12_008.dat']\n \n if fsps_templates:\n #templates = ['templates/fsps/tweak_fsps_temp_kc13_12_0{0:02d}.dat'.format(i+1) for i in range(12)]\n templates = ['fsps/fsps_QSF_12_v3_nolines_0{0:02d}.dat'.format(i+1) for i in range(12)]\n #templates = ['fsps/fsps_QSF_7_v3_nolines_0{0:02d}.dat'.format(i+1) for i in range(7)]\n \n \n if alf_template:\n templates.append('alf_SSP.dat')\n \n if continuum_list is not None:\n templates = continuum_list\n \n temp_list = OrderedDict()\n for temp in templates:\n data = np.loadtxt(os.path.join(os.getenv('GRIZLI'), 'templates', temp), unpack=True)\n #scl = np.interp(5500., data[0], data[1])\n scl = 1.\n name = temp #os.path.basename(temp)\n temp_list[name] = SpectrumTemplate(wave=data[0], flux=data[1]/scl,\n name=name)\n \n temp_list[name].name = name\n \n if stars:\n return temp_list\n \n ### Emission lines:\n line_wavelengths, line_ratios = get_line_wavelengths()\n \n if line_complexes:\n #line_list = ['Ha+SII', 'OIII+Hb+Ha', 'OII']\n #line_list = ['Ha+SII', 'OIII+Hb', 'OII']\n line_list = ['Ha+NII+SII+SIII+He', 'OIII+Hb', 'OII+Ne', 'Lya+CIV']\n else:\n if full_line_list is None:\n line_list = DEFAULT_LINE_LIST\n else:\n line_list = full_line_list\n \n #line_list = ['Ha', 'SII']\n \n # Use FSPS grid for lines\n wave_grid = None\n # if fsps_templates:\n # wave_grid = data[0]\n # else:\n # wave_grid = None \n \n for li in line_list:\n scl = line_ratios[li]/np.sum(line_ratios[li])\n for i in range(len(scl)):\n line_i = SpectrumTemplate(wave=wave_grid, \n central_wave=line_wavelengths[li][i], \n flux=None, fwhm=fwhm, velocity=True)\n \n if i == 0:\n line_temp = line_i*scl[i]\n else:\n line_temp = line_temp + line_i*scl[i]\n \n name = 'line {0}'.format(li)\n line_temp.name = name\n temp_list[name] = line_temp\n \n return temp_list", "def test_ttd2(self):\n filename = str(self.temp_j2k_filename)\n\n xtx2_setup(filename)\n\n kwargs = {'x0': 0,\n 'y0': 0,\n 'x1': 128,\n 'y1': 128,\n 'filename': filename,\n 'codec_format': openjp2.CODEC_JP2}\n tile_decoder(**kwargs)\n self.assertTrue(True)", "def genLattice(structure,in_network,dim,supercell,prec=1E-4,\n seed_index=0,c_mag=60,y_dist=-1):\n\n # Generate vectors in plane/line, relative to\n # the first atom in the network of atoms\n \n if y_dist==-1:\n y_dist=c_mag/3\n \n new = [x for x in in_network if abs(x[2])<np.pi/2]\n return_structure=False\n mat = np.array(structure.lattice.as_dict()['matrix'])\n coords = np.array([np.dot(mat.T,x.frac_coords%1) for x in structure.sites])\n specs = structure.species\n ref_ele_d = getUniqueCount(specs)\n for i in ref_ele_d:\n ref_ele_d[i]/=(supercell**dim)\n coords = coords-coords[seed_index]\n \n\n\n\n\n for lat_vectors in sorted(new,key=itemgetter(3)):\n\n # Create lattice matrix to fit atomic coordinates against\n # In 2D\n if dim==2:\n new_c = np.cross(lat_vectors[0],lat_vectors[1])\n scale_c = c_mag/magni(new_c)\n\n latt_attempt = np.array([lat_vectors[0],lat_vectors[1],\\\n new_c*scale_c])\n \n # In 1D\n elif dim==1:\n unitV = lat_vectors[0]/magni(lat_vectors[0])\n if unitV[0]==0:\n perp1 = [1,0,0]\n elif unitV[1]==0:\n perp1 = [0,1,0]\n elif unitV[2]==0:\n perp1 = [0,0,1]\n else:\n perp1 = [1,1,-1*(unitV[0]+unitV[1])/unitV[2]]\n perp1 = perp1/np.linalg.norm(perp1)*c_mag\n perp2 = np.cross(unitV,perp1)\n perp2 = perp2/np.linalg.norm(perp2)*c_mag\n latt_attempt = np.array([lat_vectors[0],perp1,perp2])\n \n # Fit atomic sites to new lattice\n temp_fracs = np.linalg.solve(latt_attempt.T,np.array(coords).T)\n \n \n\n # Make list of all fractional positions, ignoring\n # which axis\n new_fracs = list([list(x) for x in temp_fracs.T])\n\n if len([x for x in np.array(new_fracs).T if \n np.all([(y>=0 and y<1) for y in np.around(x[:dim],3)]) and\n np.all([(y>=-y_dist/c_mag and y<y_dist/c_mag) for \n y in np.around(x[dim:],3)])])==len(new_fracs[0])/supercell**dim:\n \n fit_fracs=[]\n new_fracs_t = np.around(new_fracs.T,6)\n for i in range(len(new_fracs[0])):\n if np.all([(y>=0 and y<1) for y in np.around(new_fracs_t[i][:dim],3)]) \\\n and np.all([(y>=-y_dist/c_mag and y<y_dist/c_mag) \n for y in np.around(new_fracs_t[i][dim:],3)]):\n fit_fracs.append([new_fracs_t[i],specs[i]])\n fit_fracs = np.array(fit_fracs).T\n new_ele_d = getUniqueCount(fit_fracs[1])\n unequal=False\n for k in new_ele_d:\n if new_ele_d[k]!=ref_ele_d[k]:\n unequal=True\n\n break\n if not unequal:\n\n return_structure=True\n break\n\n\n\n # If match found\n if return_structure:\n return(np.array(latt_attempt),fit_fracs)\n # If no match found\n else:\n return([],[])", "def heat_map(path):\r\n x, y= np.loadtxt(fname=path, delimiter='\\t',dtype=int,\r\n usecols = (1,2), skiprows=100, unpack = True)\r\n\r\n fig, (ax,ax2) = plt.subplots(nrows=2, sharex=True, figsize=(20,10))\r\n\r\n extent = [x[0]-(x[1]-x[0])/2, x[-1]+(x[1]-x[0])/2,0,1]\r\n ax.imshow(y[np.newaxis,:], cmap=\"plasma\", aspect=\"auto\", extent=extent)\r\n ax2.plot(x,y)\r\n plt.tight_layout()\r\n return plt.show()", "def load_2D_netCDF(filename, var_name, lat_name, lon_name):\n data = Dataset(filename, 'r')\n var = data[var_name][:]\n lats = data[lat_name][:]\n lons = data[lon_name][:]\n data.close()\n return var, lats, lons", "def open_tile(filename):\n geoimg = gippy.GeoImage(filename, True)\n z, x, y = map(int, geoimg.basename().split('-')[0:4])\n tile = Tile.from_google(google_x=x, google_y=y, zoom=z)\n geoimg.set_srs('EPSG:3857')\n minpt = tile.bounds[0].meters\n maxpt = tile.bounds[1].meters\n affine = np.array(\n [\n minpt[0], (maxpt[0]-minpt[0])/geoimg.xsize(), 0.0,\n maxpt[1], 0.0, -(maxpt[1]-minpt[1])/geoimg.ysize()\n ])\n geoimg.set_affine(affine)\n geoimg.set_nodata(-1)\n return geoimg", "def __init__(self, column_pos, row_pos, type='1'):\n\t\tself.column_pos = column_pos\n\t\tself.row_pos = row_pos\n\t\tself.terrain_type = type", "def load_building_blocks(path):\t\t\n\t#TODO : automatization\n\tbenzene = Building_Block(abbrev=\"B\", num_atoms=6,origin=0, para_pos=3, para_angle=0, meta_pos=4 , meta_angle = -np.pi/3., ortho_pos=5, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/benzene.xyz\")\n\tnapthtalene = Building_Block(abbrev=\"N\", num_atoms=18,origin=0, para_pos=12, para_angle=0., meta_pos=11 , meta_angle = -np.pi/3., ortho_pos=10, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/naphtalene.xyz\")\n\tdbPc1 = Building_Block(abbrev=\"dbPc1\", num_atoms=32,origin=13, para_pos=1, para_angle=0, meta_pos=0 , meta_angle = +np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc1_block.xyz\")\n\tdbPc4 = Building_Block(abbrev=\"dbPc4\", num_atoms=55,origin=22, para_pos=1, para_angle=0, meta_pos=0 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc4.xyz\")\n\tdbPc6 = Building_Block(abbrev=\"dbPc6\", num_atoms=52,origin=17, para_pos=0, para_angle=0, meta_pos=1 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc6.xyz\")\n\tdbPc5 = Building_Block(abbrev=\"dbPc5\", num_atoms=58,origin=12, para_pos=26, para_angle=0, meta_pos=20 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc5.xyz\")\n\tpseudo_para_naph_PCP = Building_Block(abbrev=\"pseudo-para_naph_PCP\", num_atoms=44,origin=0, para_pos=18, para_angle=0, meta_pos=16 , meta_angle = -np.pi/3, ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/pseudo-para_naph_PCP.xyz\")\n\tline =Building_Block(abbrev=\"line\", num_atoms=4,origin=0, para_pos=1, para_angle=0, meta_pos=1 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/line.xyz\")\n\t#rot=Building_Block(abbrev=\"line\", num_atoms=47,origin=6, para_pos=16, para_angle=0, meta_pos=20 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=2, path=path+\"/rot.xyz\")\n\t#stacked_anth=Building_Block(abbrev=\"stacked_anth\", num_atoms=62,origin=3, para_pos=22, para_angle=0, meta_pos=30 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=2, path=path+\"/stacked_anth.xyz\")\n\t\n\tbuilding_blocks = [benzene,napthtalene,dbPc1,dbPc4,dbPc6, dbPc5,pseudo_para_naph_PCP, line]\n\n\treturn building_blocks", "def load_ref_system():\n return psr.make_system(\"\"\"\n C 0.9665 1.0824 0.5413\n C 1.2773 -0.4115 0.7732\n C 0.2828 -1.1346 -0.1576\n C -1.1303 -0.9646 0.4366\n C -1.4411 0.5293 0.2046\n C -0.1729 1.0557 -0.4978\n C 0.2273 -0.1635 -1.3572\n H 1.8453 1.6266 0.1530\n H 0.6690 1.6004 1.4697\n H 1.1499 -0.7100 1.8284\n H 2.3205 -0.6573 0.5074\n H 0.5550 -2.1799 -0.3874\n H -1.1674 -1.2423 1.5044\n H -1.8581 -1.6172 -0.0769\n H -1.6481 1.0681 1.1457\n H -2.3332 0.6664 -0.4315\n H -0.3162 2.0081 -1.0381\n H 1.1910 -0.0439 -1.8783\n H -0.5219 -0.4374 -2.1175\n \"\"\")", "def _vmd_script_molecule(mole, filename=\"molecule.xyz\"):\n output = \"# load new molecule\\n\"\n if len(mole.atom) == 0:\n raise ValueError(\"Need at least one molecule file with coordinates.\")\n atoms = mole.atom\n natoms = len(mole.atom[0:, 0])\n f = open(filename, \"w\")\n f.write(str(natoms) + \"\\n\\n\")\n for i in range(0, natoms):\n symb = str(atoms[i, 0])\n coord = \" \".join(map(str, atoms[i, 1].tolist()))\n f.write(symb + \" \" + coord + \"\\n\")\n f.close()\n output += (\n \"mol {0} {1} type {2} first 0 last -1 step 1 filebonds 1 autobonds 1 waitfor all\"\n \"\\n\".format(\"new\", filename, \"{xyz}\")\n )\n output += \"#\\n\" \"# representation of the atoms\\n\"\n output += \"mol representation CPK 1.000000 0.300000 118.000000 131.000000\\n\"\n output += (\n \"mol delrep 0 top\\n\"\n \"mol color Element\\n\"\n \"mol selection {{all}}\\n\"\n \"mol material Opaque\\n\"\n \"mol addrep top\\n\"\n \"#\\n\"\n )\n return output", "def create_unstructured_grid(coordinates_text_file, vectors_text_file) -> vtkUnstructuredGrid:\n\n # Set points as vtkPoints from coordinates numpy array\n points = vtkPoints()\n points.SetData(numpy_to_vtk(loadtxt(fname=coordinates_text_file), deep=True))\n\n # Set vectors as vtkDoubleArray from vectors numpy array\n vectors = numpy_to_vtk(loadtxt(fname=vectors_text_file), deep=True)\n\n # Set unstructured grid\n unstructured_grid = vtkUnstructuredGrid()\n unstructured_grid.SetPoints(points)\n unstructured_grid.GetPointData().SetVectors(vectors)\n\n return unstructured_grid", "def __init__(self, fn):\n\n #for the scripting engine\n script_engine.ScriptableObject.__init__(self)\n\n #store variables we'll need later\n self.fn = fn\n\n #get a script engine (singleton)\n self.scriptEngine = script_engine.ScriptEngine()\n\n #parse the XML file\n root = data.getTreeRoot(fn)\n self.music = os.path.join(settings.path, \"data\", data.getAttr(root, \"music\", data.D_STRING))\n\n #open the actual map data file to create the map tile data\n mapPath = os.path.join(settings.path, \"data\", data.getAttr(root, \"file\", data.D_STRING))\n self.openMap(mapPath)\n\n #create the tileset\n tilesetPath = os.path.join(settings.path, \"data\", data.getAttr(root, \"tileset\", data.D_STRING))\n self.tileset = tileset.Tileset(tilesetPath)\n\n #set the border tiles\n self.borderTiles = {}\n borderNode = data.getChild(root, \"border\")\n\n #set each border node with the correct tile indexes, subtracting 1 because the tileset starts at 1 not 0\n self.borderTiles[BD_NW] = data.getAttr(borderNode, \"nw\", data.D_INT)-1\n self.borderTiles[BD_NE] = data.getAttr(borderNode, \"ne\", data.D_INT)-1\n self.borderTiles[BD_SW] = data.getAttr(borderNode, \"sw\", data.D_INT)-1\n self.borderTiles[BD_SE] = data.getAttr(borderNode, \"se\", data.D_INT)-1\n \n #create any connections from the map\n #connected maps will not be loaded until the map becomes the main game map\n #connections are stored as {direction: (filename, offset)}\n self.connections = {}\n self.connectedMaps = {}\n for c in data.getChildren(root, \"connection\"):\n side = data.getAttr(c, \"side\", data.D_STRING)\n fp = os.path.join(settings.path, \"data\", data.getAttr(c, \"map\", data.D_STRING))\n offset = data.getAttr(c, \"offset\", data.D_INT)\n \n if side == \"left\":\n self.connections[sprite.DIR_LEFT] = (fp, offset)\n elif side == \"right\":\n self.connections[sprite.DIR_RIGHT] = (fp, offset)\n elif side == \"up\":\n self.connections[sprite.DIR_UP] = (fp, offset)\n elif side == \"down\":\n self.connections[sprite.DIR_DOWN] = (fp, offset)\n\n #create any NPCs, adding them to the sprite dictionary\n self.sprites = {}\n for n in data.getChildren(root, \"npc\"):\n spr = npc.NPC(n, self)\n self.sprites[spr.id] = spr\n\n #create a dictionary to hold positions reserved by moving sprites\n self.reservedPositions = {}\n\n #create script and warp events, adding them to the events dictionary\n #if a load script is defined, create it\n self.events = {}\n loadScript = None\n for s in data.getChildren(root, \"script\"):\n trigger = data.getAttr(s, \"trigger\", data.D_STRING)\n if trigger == \"load\":\n loadScript = script_engine.Script(s) \n else:\n position = tuple(data.getAttr(s, \"position\", data.D_INT2LIST)) \n self.events[position] = events.ScriptEvent(s, self)\n \n for w in root.findall(\"warp\"):\n position = tuple(data.getAttr(w, \"position\", data.D_INT2LIST))\n self.events[position] = events.Warp(w, self)\n\n #if there is a load script, run it\n if loadScript is not None:\n self.scriptEngine.run(loadScript, self)", "def load_ref_system():\n return psr.make_system(\"\"\"\n C -1.9986 0.5071 -0.0866\n C -0.8493 -0.4533 -0.4579\n O 0.0586 0.2088 -1.3721\n C 0.8663 1.2675 -0.8382\n C 1.5547 0.9669 0.4999\n C 0.8140 -0.0134 1.4353\n O -0.0108 0.8489 2.2397\n C -0.0933 -1.0605 0.7453\n O 0.7150 -2.1599 0.3631\n O 2.8279 0.4214 0.1262\n O -1.2994 -1.5385 -1.2296\n O -2.5404 1.0789 -1.2774\n H 0.2375 2.1724 -0.7574\n H 1.6284 1.3902 -1.6421\n H -0.8123 -1.5049 1.4752\n H 1.5482 -0.5527 2.0790\n H 1.7274 1.9236 1.0571\n H -1.6650 1.3036 0.6075\n H -2.8578 -0.0327 0.3562\n H -1.7906 -1.2070 -2.0296\n H 1.1049 -1.9987 -0.5388\n H -0.2495 0.4143 3.0835\n H 3.3870 0.2567 0.9125\n H -1.8133 1.4740 -1.8186\n \"\"\")", "def get_xyz_coord(path):\r\n\tlabels = loadmat(path)\r\n\tanno_xyz = []\r\n\tfor index in range(0, 1500):\r\n\t\tanno_xyz.append([])\r\n\t\tfor i in range(0, 21):\r\n\t\t\tx = labels['handPara'][0][i][index]\r\n\t\t\ty = labels['handPara'][1][i][index]\r\n\t\t\tz = labels['handPara'][2][i][index]\r\n\t\t\tanno_xyz[-1].append([x, y, z])\r\n\tanno_xyz = np.array(anno_xyz)\r\n\t# anno_xyz = np.reshape(labels['handPara'], (1500, 21, 3))\r\n\treturn anno_xyz", "def __init__(self, x, y):\n self.height = x\n self.width = y\n self.grid = self.initialize(self.height, self.width)\n self.randx = random.randint(0, self.height-1)\n self.randy = random.randint(0, self.width-1)\n #self.make()\n #self.show()", "def saved_template(self, template_id):\n\n # From user params get the wanted type and size\n category, size = template_id.split('_')\n\n # Parse the xml file\n template_tree = Etree.parse(\"patron.xml\")\n root = template_tree.getroot()\n\n # Find The selected template\n for template in root.findall(\"./type[@name='%s']/template[@size='%s']\" % (category, size)):\n # Find useful data\n info = 'T-shirt_template_%s_%s' % (category, size)\n transform = template.find('transform')\n\n # Creation of a main group for the Template\n template_attribs = {\n inkex.addNS('label', 'inkscape'): info,\n 'transform': transform.text if transform is not None else ''\n }\n template_group = inkex.etree.SubElement(self.current_layer, 'g', template_attribs)\n\n # For each pieces of the template\n for piece in template.findall('piece'):\n # Find useful data\n pieceinfo = info + \"_\" + piece.find('name').text\n transform = piece.find('transform')\n\n # Create a group for the piece\n piece_attribs = {\n inkex.addNS('label', 'inkscape'): pieceinfo,\n 'transform': transform.text if transform is not None else ''\n }\n piece_group = inkex.etree.SubElement(template_group, 'g', piece_attribs)\n\n # Add a text to display the piece info\n add_text(piece_group, pieceinfo.replace('_', ' '), piece.find('info').text, 15)\n\n # For each paths of the piece\n for part in piece.findall('part'):\n # Find useful data\n label = part.find('name').text\n partinfo = pieceinfo + \"_\" + label\n transform = part.find('transform')\n\n # Create a group for the shape\n part_attribs = {\n inkex.addNS('label', 'inkscape'): partinfo,\n 'transform': transform.text if transform is not None else ''\n }\n part_group = inkex.etree.SubElement(piece_group, 'g', part_attribs)\n\n # Add the path to the group\n style = self.normal_line if self.options.style == 'print' or label != 'offset' else self.cut_line\n path_attribs = {\n inkex.addNS('label', 'inkscape'): partinfo,\n 'style': simplestyle.formatStyle(style),\n 'd': part.find('path').text\n }\n inkex.etree.SubElement(part_group, inkex.addNS('path', 'svg'), path_attribs)", "def populate(self, compound_dict=None, x=None, y=None, z=None):\n if self.dimension == 3:\n a = self.lattice_spacings[0]\n b = self.lattice_spacings[1]\n c = self.lattice_spacings[2]\n if x is None:\n x = 1\n if y is None:\n y = 1\n if z is None:\n z = 1\n if x < 1 or y < 1 or z < 1:\n raise ValueError('Incorrect populate value: X, Y, or Z is < 1.'\n ' Cannot replicate unit cell less than 1')\n elif self.dimension == 2:\n a = self.lattice_spacings[0]\n b = self.lattice_spacings[1]\n if x is None:\n x = 1\n if y is None:\n y = 1\n if z is None:\n pass\n else:\n raise ValueError('Z is defined although dimension is 2D')\n if x < 1 or y < 1:\n raise ValueError('Incorrect populate value: X or Y is < 1. '\n ' Cannot replicate unit cell less than 1')\n elif self.dimension == 1:\n a = self.lattice_spacings[0]\n if x is None:\n x = 1\n if y is None:\n pass\n else:\n raise ValueError('Y is defined although dimension is 1D')\n if z is None:\n pass\n if z is not None:\n raise ValueError('Z is defined although dimension is 2D')\n if x < 1:\n raise ValueError('Incorrect populate value: X < 1. '\n ' Cannot replicate unit cell less than 1')\n else:\n raise ValueError('Dimension not defined.')\n\n cell = defaultdict(list)\n for key, val in self.basis_vectors.items():\n for val_item in range(len(val)):\n if self.dimension == 3:\n for i in range(x):\n for j in range(y):\n for k in range(z):\n tmpx = (val[val_item][0] + i) * a\n tmpy = (val[val_item][1] + j) * b\n tmpz = (val[val_item][2] + k) * c\n tmp_tuple = tuple((tmpx, tmpy, tmpz))\n cell[key].append(((tmp_tuple)))\n elif self.dimension == 2:\n for i in range(x):\n for j in range(y):\n tmpx = (val[val_item][0] + i) * a\n tmpy = (val[val_item][1] + j) * b\n tmp_tuple = tuple((tmpx, tmpy))\n cell[key].append(((tmp_tuple)))\n else:\n for i in range(x):\n tmpx = (val[val_item][0] + i) * a\n tmp_tuple = tuple((tmpx))\n cell[key].append(((tmp_tuple)))\n\n ret_lattice = mb.Compound()\n if compound_dict is None:\n for key_id, all_pos in cell.items():\n particle = mb.Particle(name=key_id, pos=[0, 0, 0])\n for pos in all_pos:\n particle_to_add = mb.clone(particle)\n mb.translate(particle_to_add, list(pos))\n ret_lattice.add(particle_to_add)\n else:\n for key_id, all_pos in cell.items():\n if isinstance(compound_dict[key_id], mb.Compound):\n compound_to_move = compound_dict[key_id]\n for pos in all_pos:\n tmp_comp = mb.clone(compound_to_move)\n mb.translate(tmp_comp, list(pos))\n ret_lattice.add(tmp_comp)\n else:\n err_type = type(compound_dict.get(key_id))\n TypeError('Invalid type in provided Compound Dictionary. '\n 'For key {}, type: {} was provided, '\n 'not Compound.'.format(key_id, err_type))\n return ret_lattice", "def load_places():\n with open(\"resources/map.txt\", \"r\") as file:\n rows = file.readlines()\n x_max = len(rows[0].splt(\"\\t\")) # Assumes all rows contain the same number of tabs\n for y in range(len(rows)):\n cols = rows[y].splt(\"\\t\")\n for x in range(x_max):\n place_name = cols[x].replace(\"\\n\",\"\")\n if place_name == \"StartingRoom\":\n global starting_position\n starting_position = (x, y)\n if place_name == \"\":\n _world[(x, y)] = None #create a key to a dict, doesn't if cell is empty\n else:\n getattr(__import__(\"places\"), place_name)(x, y)\n \"\"\"reflect into places module, find class whose name matches place_name and\n passes the coordinates (x, y) to the constructor of the places\"\"\"\n\n \"\"\"alternative : tile_map = [[FindGoldRoom(),GoblinRoom(),None,None,None],\n [None,StartingRoom(),EmptyCave(),EmptyCave(),None]] \"\"\"", "def read_gro(filename):\n top = Topology()\n\n with open(filename, \"r\") as gro_file:\n top.name = str(gro_file.readline().strip())\n n_atoms = int(gro_file.readline())\n coords = u.nm * np.zeros(shape=(n_atoms, 3))\n for row, _ in enumerate(coords):\n line = gro_file.readline()\n content = line.split()\n if not line:\n msg = (\n \"Incorrect number of lines in .gro file. Based on the \"\n \"number in the second line of the file, {} rows of\"\n \"atoms were expected, but at least one fewer was found.\"\n )\n raise ValueError(msg.format(n_atoms))\n\n res = content[0]\n atom_name = content[1]\n atom_id = content[2]\n coords[row] = u.nm * np.array(\n [\n float(content[3]),\n float(content[4]),\n float(content[5]),\n ]\n )\n site = Atom(name=atom_name, position=coords[row])\n\n r = re.compile(\"([0-9]+)([a-zA-Z]+)\")\n m = r.match(res)\n site.molecule = (m.group(2), int(m.group(1)))\n site.residue = (m.group(2), int(m.group(1)))\n top.add_site(site, update_types=False)\n top.update_topology()\n\n # Box information\n line = gro_file.readline().split()\n top.box = Box(u.nm * np.array([float(val) for val in line[:3]]))\n\n # Verify we have read the last line by ensuring the next line in blank\n line = gro_file.readline()\n if line:\n msg = (\n \"Incorrect number of lines in input file. Based on the \"\n \"number in the second line of the file, {} rows of atoms \"\n \"were expected, but at least one more was found.\"\n )\n raise ValueError(msg.format(n_atoms))\n\n return top", "def New(*args, **kargs):\n obj = itkTranslationTransformD2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def spatial(self):", "def __init__(self, entityBlocks):\n Template.__init__(self, entityBlocks)", "def _load_molecule(self):\n self.pymol = pybel.readstring(self.input_format, self.file_dic['input'])", "def __init__(self, data, (x,y)):\n\t\tGameImage.__init__(self, data)\n\t\tself.coords = (x,y)", "def import_gpos( self ):\n if not path.isfile(self.from_file):\n # no file there\n self.gpos_file = array([], 'd')\n return\n import xml.dom.minidom\n doc = xml.dom.minidom.parse(self.from_file)\n names = []\n xyz = []\n for el in doc.getElementsByTagName('pos'):\n names.append(el.getAttribute('subgrid'))\n xyz.append(list(map(lambda a : float(el.getAttribute(a)), 'xyz')))\n self.gpos_file = array(xyz, 'd').swapaxes(0, 1)\n self.subgrids = array(names)", "def test_read_from_raster_file(cleantopo_br):\n with mapchete.open(cleantopo_br.dict) as mp:\n tile = mp.config.process_pyramid.tile(5, 0, 0)\n user_process = mapchete.MapcheteProcess(\n tile=tile,\n params=mp.config.params_at_zoom(tile.zoom),\n input=mp.config.get_inputs_for_tile(tile),\n )\n with user_process.open(\"file1\") as f:\n assert f.read().shape == f.read([1]).shape == (1, *f.read(1).shape)", "def generate_tile(self, tms_x, tms_y, tms_z, arguments):\n pass", "def loadWorld(self, filename):\n worldFile = open(filename, 'r');\n\n for line in worldFile:\n info = line.split(' ');\n if(info[0]==\"WIDTH\"):\n self.mWidth = int(info[1]);\n elif info[0] == \"HEIGHT\":\n self.mHeight = int(info[1]);\n elif info[0] == \"SPACE\":\n if info[1] == \"rect\":\n self.mSpaces += [Rect( int(info[2]), int(info[3]),int(info[4]),int(info[5]) )];\n elif info[1] == \"circle\":\n self.mSpaces += [Rect( int(info[2]), int(info[3]),int(info[4]) )];\n elif info[0] == \"OBSTACLE\":\n if info[1] == \"rect\":\n self.mObstMgr.addObstacle( Rect( int(info[2]), int(info[3]),int(info[4]),int(info[5]) ));\n pass\n elif info[1] == \"circle\":\n self.mObstMgr.addObstacle( Circle( int(info[2]), int(info[3]),int(info[4]) ));\n pass\n pass", "def load_objects(self, tmx):\n self.objects = {}\n obj_layer = [l for l in tmx.layers if l.name == 'Objects'][0]\n for x, y, (imgpath, *_) in obj_layer.tiles():\n self.objects[x, y] = self.images[imgpath]\n\n # bit 0 blocks sight, 1 blocks movement so value 3 bloks both\n self.grid.cells[x, y] = 3 if imgpath not in NOT_OBSTRUCTIONS else 0", "def load_grid(self, elem_file, elec_file):\n self.load_elem_file(elem_file)\n self.load_elec_file(elec_file)" ]
[ "0.61214197", "0.57492346", "0.5670216", "0.5607972", "0.5434534", "0.5393163", "0.5368674", "0.53446394", "0.53130543", "0.53116614", "0.52955234", "0.5252723", "0.52483314", "0.5248229", "0.5230815", "0.5224649", "0.5208221", "0.52067053", "0.51932734", "0.51882654", "0.51314366", "0.5123428", "0.5123428", "0.5117131", "0.51069885", "0.51066107", "0.5102621", "0.5102161", "0.50950056", "0.5070932", "0.5045702", "0.50363356", "0.50284946", "0.50228804", "0.50102806", "0.5008902", "0.5008518", "0.50072145", "0.5005856", "0.49991873", "0.49903655", "0.49863797", "0.49853808", "0.49853697", "0.49823207", "0.49795383", "0.4978927", "0.4978357", "0.49725658", "0.496631", "0.4965083", "0.496291", "0.49628562", "0.4961782", "0.49580118", "0.49562812", "0.49554476", "0.49530298", "0.49503487", "0.4946854", "0.49446", "0.49404475", "0.49287257", "0.49286082", "0.4928172", "0.4925488", "0.49233043", "0.4922286", "0.49222583", "0.49172118", "0.49114585", "0.49101838", "0.4898147", "0.4894125", "0.48894694", "0.48872682", "0.48865557", "0.48809358", "0.48806125", "0.48725134", "0.4864588", "0.4859786", "0.4857463", "0.48558688", "0.48519295", "0.48491916", "0.48484117", "0.48483923", "0.48471597", "0.48462868", "0.4846236", "0.48439348", "0.48435867", "0.482837", "0.4827518", "0.48255283", "0.4825237", "0.4822672", "0.48145297", "0.48134652" ]
0.54654664
4
Get depiction done using solely the default RDKit functionality.
def _get_2D_by_rdkit(self, mol): try: rdCoordGen.AddCoords(mol, self.coordgen_params) flaws = DepictionValidator(mol).depiction_score() return DepictionResult( source=DepictionSource.RDKit, template_name=None, mol=mol, score=flaws ) except Exception: return DepictionResult( source=DepictionSource.Failed, template_name=None, mol=None, score=1000 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_rd(self):\n return self.__rd", "def dl():\n raise NotImplementedError()", "def d(self):\n pass", "def d(self):\n pass", "def DM(self):", "def rd_dr(self, rd):\n # add bits for the bypassed devices\n tdi = bits.bits(rd.n + self.ndevs_before + self.ndevs_after)\n self.driver.scan_dr(tdi, rd)\n # strip bits from the bypassed devices\n rd.drop_msb(self.ndevs_after)\n rd.drop_lsb(self.ndevs_before)", "def test_dvidir(self):\n self.chck_triple('dvidir')", "def ddd():\n return get_data(db, MyTable)", "def softDeformer():\n softDeformerUI()", "def cmd_dele(args):", "def getDmd(self):\n for obj in aq_chain(self):\n if getattr(obj, 'id', None) == 'dmd': return obj", "def rd(self):\n return self._rd", "def digitalRead(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _get_source_rd(self):\n return self.__source_rd", "def degibber(self):", "def dataproduct() -> None:\n pass", "def getDr(d, IO, type_):\n # Define radial distance range\n xp = np.linspace(0, d, d*50)\n yp = 0\n\n # Compute distortion corrections\n x0 = IO[\"x0\"]\n y0 = IO[\"y0\"]\n xbar = xp - x0\n ybar = yp - y0\n r = np.hypot(xbar, ybar)\n\n if type_ == \"symmetric\":\n k1 = IO[\"k1\"]\n k2 = IO[\"k2\"]\n k3 = IO[\"k3\"]\n dx = xbar * (r**2 * k1 + r**4 * k2 + r**6 * k3)\n dy = ybar * (r**2 * k1 + r**4 * k2 + r**6 * k3)\n elif type_ == \"decentering\":\n p1 = IO[\"p1\"]\n p2 = IO[\"p2\"]\n dx = (p1 * (r**2 + 2 * xbar**2) + 2 * p2 * xbar * ybar)\n dy = (2 * p1 * xbar * ybar + p2 * (r**2 + 2 * ybar**2))\n\n dr = np.hypot(dx, dy)\n\n return xp, dr", "def dk_rethinkdb(request):\n return _dkrethingdb(request)", "def applyDemapping(self):\n pass", "def configure_dmd(self):\n raise NotImplementedError", "def __deref__(self):\n return _spacegrant_swig.hdlc_deframer_sptr___deref__(self)", "def performDeletions(self):\n return _libsbml.Submodel_performDeletions(self)", "def LDLT(self):\n\t\tpass", "def get_deposition(self, id: uplink.Path):\n pass", "def m_get_DIDDocument(DID):\n\n DID, name, DIDDocument, active = resolver.resolveDID(DID)\n\n print(f\"Name: : {name}\")\n print(json.dumps(DIDDocument, ensure_ascii=False, indent=3))", "def getDefaultData(dmd):", "def downlinker(self):\n return self.__downlinker", "def xkcd():", "def detx(self, det_id, t0set=None, calibration=None):\n url = 'detx/{0}?'.format(det_id) # '?' since it's ignored if no args\n if t0set is not None:\n url += '&t0set=' + t0set\n if calibration is not None:\n url += '&calibrid=' + calibration\n\n detx = self._get_content(url)\n return detx", "def use(self):", "def rref_den(self, *, method='auto', keep_domain=True):\n return _dm_rref_den(self, method=method, keep_domain=keep_domain)", "def disarm(self):\n pass", "def decapod_client(get_decapod_client):\n return get_decapod_client()", "def d_dr(self, r, t):\n return self.d_do(r,t) * do_dr(r,t)", "def add_dut(self):\n pass", "def get():", "def get():", "def XPLMGetDatad_f(inRefcon):\n pass", "def deb(ctx):\n pass", "def retrieve(self):\n self.DB.close_connection()\n self.r1.queryCase = self.case\n self.r1.knn(1)", "def link_dihedra(self, verbose: bool = ...) -> None:\n ...", "def default_get(self, cr, uid, fields, context=None): \n \n \n res = super(granted_rights_order, self).default_get(cr, uid, fields, context=context)\n \n employee_obj = self.pool.get('hr.employee')\n department_obj = self.pool.get('hr.department')\n manager = False\n donor_emp_id = []\n \n if uid != 1 :\n\n donor_emp_id = employee_obj.search(cr ,uid, [('user_id' , '=' , uid )])\n deparment_id = employee_obj.browse(cr,uid,donor_emp_id[0]).department_id.id\n \n if donor_emp_id[0] == department_obj.browse(cr,uid,deparment_id).manager_id.id :\n manager = True\n \n \n \n \n \n \n \n \n \n if donor_emp_id :\n res.update({ 'employee_donor': donor_emp_id[0], \n 'department_id' : deparment_id,\n 'is_a_amanger' : manager,\n })\n return res", "def do_d(self, arg):\n self.do_done(arg)", "def get_main_object(tc):\n return Daal(tc)", "def get_DER(self):\n\n self.check()\n if self.DER:\n return self.DER\n if self.POW:\n self.DER = self.POW.derWrite()\n return self.get_DER()\n raise rpki.exceptions.DERObjectConversionError(\"No conversion path to DER available\")", "def get_DER(self):\n\n self.check()\n if self.DER:\n return self.DER\n if self.POW:\n self.DER = self.POW.derWrite()\n return self.get_DER()\n raise rpki.exceptions.DERObjectConversionError(\"No conversion path to DER available\")", "def get_DER(self):\n\n self.check()\n if self.DER:\n return self.DER\n if self.POW:\n self.DER = self.POW.derWrite()\n return self.get_DER()\n raise rpki.exceptions.DERObjectConversionError(\"No conversion path to DER available\")", "def get_DER(self):\n\n self.check()\n if self.DER:\n return self.DER\n if self.POW:\n self.DER = self.POW.derWrite()\n return self.get_DER()\n raise rpki.exceptions.DERObjectConversionError(\"No conversion path to DER available\")", "def test_dlr_dgw_uninstall(self):\n self._common_uninstall_delete(\n 'dlr_id', dlr_dgw.delete,\n {'gateway': {}},\n delete_args=['routingConfig'],\n delete_kwargs={\n 'uri_parameters': {'edgeId': 'dlr_id'}\n }\n )", "def fdr(self):\n # TODO: Implement FDR on .mzid or .mztab (i.e. from pyOpenMS or pyteomics)\n # https://pyteomics.readthedocs.io/en/latest/api/mzid.html\n # did not work for me: mzid.filter()\n pass", "def data_rd(self, rdId):\n\t\ttry:\n\t\t\treturn base.caches.getRD(rdId)\n\t\texcept base.NotFoundError:\n\t\t\treturn None", "def wr_rd_dr(self, wr, rd):\n tdi = bits.bits()\n tdi.append_ones(self.ndevs_before)\n tdi.append(wr)\n tdi.append_ones(self.ndevs_after)\n self.driver.scan_dr(tdi, rd)\n # strip the dr bits from the bypassed devices\n rd.drop_msb(self.ndevs_after)\n rd.drop_lsb(self.ndevs_before)", "def get_kdv(view):\n\n kdv_file_name = '{0}{1}.pickle'.format(env.local_storage, view)\n dict = {}\n\n if os.path.exists(kdv_file_name):\n\n max_file_age = 3\n # file_date_seconds = os.path.getctime(kdv_file_name)\n file_date_seconds = os.path.getmtime(kdv_file_name)\n file_date_datetime = dt.datetime.fromtimestamp(file_date_seconds)\n file_date_limit = dt.datetime.now() - dt.timedelta(days=max_file_age)\n\n if file_date_datetime < file_date_limit:\n ml.log_and_print(\"[info] getkdvelements.py -> get_kdv: Old xKDV. Removing file from local storage: {0}\".format(kdv_file_name))\n os.remove(kdv_file_name) # remove before attempting to get new, or else it will always find a file and start getting new again.\n ordered_dict = get_kdv(view)\n mp.pickle_anything(ordered_dict, kdv_file_name)\n else:\n # ml.log_and_print(\"[info] getkdvelements.py -> get_kdv: Getting KDV from local storage: {0}\".format(kdv_file_name))\n ordered_dict = mp.unpickle_anything(kdv_file_name, print_message=False)\n\n else:\n\n filter = 'filter=Langkey%20eq%201'\n\n if 'TripTypeKDV' in view:\n filter = 'filter=LangKey%20eq%201'\n\n url = 'https://api.nve.no/hydrology/regobs/{0}/OData.svc/{1}?${2}&$format=json'.format(env.odata_version, view, filter)\n lang_key = 1\n\n print(\"getkdvelements.py -> get_kdv: Getting KDV from URL: {0}\".format(url))\n kdv = requests.get(url).json()\n\n for a in kdv['d']['results']:\n try:\n sort_order = a['SortOrder']\n is_active = a['IsActive']\n\n if 'AvalCauseKDV' in url and 9 < int(a['ID']) < 26: # this table gets special treatment. Short names are in description and long names are in Name.\n id = int(a['ID'])\n name = a['Description']\n description = a['Name']\n elif 'TripTypeKDV' in view:\n id = int(a['TripTypeTID'])\n name = a['Name']\n description = a['Descr']\n else:\n id = int(a['ID'])\n name = a['Name']\n description = a['Description']\n\n dict[id] = vc.KDVelement(id, sort_order, is_active, name, description, lang_key)\n\n except (RuntimeError, TypeError, NameError):\n pass\n\n ordered_dict = collections.OrderedDict(sorted(dict.items()))\n mp.pickle_anything(ordered_dict, kdv_file_name)\n\n return ordered_dict", "def D(self):\n if not hasattr(self, '_D'):\n self.logger.warning('The differential operator G.D is not '\n 'available, we need to compute it. Explicitly '\n 'call G.compute_differential_operator() '\n 'once beforehand to suppress the warning.')\n self.compute_differential_operator()\n return self._D", "def onto(disgenet, edam):\n disgenet = disgenet.replace(' ', '+').replace(\"'\", \"%27\")\n edam = edam.replace(' ', '+').replace(\"'\", \"%27\")\n disid = subprocess.Popen(\n [\"curl -s -k http://127.0.0.1:3030/ds/query -X POST --data \" +\n \"'query=PREFIX+rdf%3A+%3Chttp%3A%2F%2Fwww.w3.org%2F1999%2F02%2F22-rdf-syntax-ns%23%3E%0A\" +\n \"PREFIX+dcterms%3A+%3Chttp%3A%2F%2Fpurl.org%2Fdc%2Fterms%2F%3E%0A\" +\n \"PREFIX+ncit%3A+%3Chttp%3A%2F%2Fncicb.nci.nih.gov%2Fxml%2Fowl%2FEVS%2FThesaurus.owl%23%3E%0A\" +\n \"SELECT+DISTINCT+%0A%09%3Fdisease+%0AFROM+%3Chttp%3A%2F%2Frdf.disgenet.org%3E+%0AWHERE+%7B%0A++\" +\n \"SERVICE+%3Chttp%3A%2F%2Frdf.disgenet.org%2Fsparql%2F%3E+%7B%0A++++\" +\n \"%3Fdisease+rdf%3Atype+ncit%3AC7057+%3B%0A++++%09dcterms%3Atitle+%22\" + disgenet +\n \"%22%40en+.%0A%7D%0A%7D' -H 'Accept: application/sparql-results+json,*/*;q=0.9'\"],\n stdout=subprocess.PIPE, shell=True).communicate()[0]\n edam_id = subprocess.Popen([\"curl -s 'http://www.ebi.ac.uk/ols/api/search?q=\" + edam + \"&ontology=edam' 'Accept: application/json'\"],\n stdout=subprocess.PIPE, shell=True).communicate()[0]\n try:\n jdisease = json.loads(disid)\n umllist = []\n umls = jdisease['results']['bindings'][0]['disease']['value']\n except (IndexError, ValueError):\n umls = \"No disgenet record\"\n try:\n jedam = json.loads(edam_id)\n eid = jedam['response']['docs'][0]['iri']\n except (IndexError, ValueError):\n eid = \"No EDAM record\"\n return umls, eid", "def load_rentedout():", "def get_DER(self):\n\n self.check()\n if self.DER:\n return self.DER\n if self.POW:\n self.DER = self.POW.derWritePrivate()\n return self.get_DER()\n raise rpki.exceptions.DERObjectConversionError(\"No conversion path to DER available\")", "def get_droid(did):\n conn = create_connection(db_location)\n c = conn.cursor()\n c.execute(\"SELECT * FROM droids WHERE droid_uid = \" + did)\n print(\"DEBUG: *****\") \n droid = dict((c.description[i][0], value) for i, value in enumerate(c.fetchone()))\n if __debug__:\n print(droid)\n conn.commit()\n conn.close()\n return (droid)", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def on_rfid(self):\n pass", "def __deref__(self):\n return _TestA_swig.my_qpsk_demod_cb_sptr___deref__(self)", "def dicom_cli():", "def get_DER(self):\n\n self.check()\n if self.DER:\n return self.DER\n if self.POW:\n self.DER = self.POW.derWritePublic()\n return self.get_DER()\n raise rpki.exceptions.DERObjectConversionError(\"No conversion path to DER available\")", "def retrieve(self):\n pass", "def flux_dEdRdP(energy, distance, power):\n\n return ReactorTools.dRdEnu_U235(energy) * ReactorTools.nuFlux(power, distance*100.)", "def main():\n obj = NetAppOntapNtfsSd()\n obj.apply()", "def get_DryDepAndWetDep_ds(wd=None, Specs=None, dates2use=None,\n DDprefix='DryDep_',\n DDVprefix='DryDepVel_',\n WLprefix='WetLossConv_',\n LSprefix='WetLossLS_',\n NewDepPrefix = 'DryAndWetDep_',\n verbose=False, debug=False,\n ):\n # Avogadro constant (Mol^-1)\n AVG = AC.constants('AVG')\n # Species to process to a combined depositional loss\n if isinstance(Specs, type(None)):\n Specs = ['NO2', 'HNO3', 'NIT', 'NITs', 'NIT-all' ]\n Specs += ['NITD{}'.format(i) for i in np.arange(1, 5)]\n\n # Get dry Deposition\n try:\n Dds = AC.get_DryDep_ds(wd=wd, dates2use=dates2use)\n # Add all all-NIT to dataset\n if any([('NIT' in i) for i in Specs]):\n Dds = AC.AddChemicalFamily2Dataset(Dds, fam='NIT-all',\n prefix=DDprefix)\n No_DryDep = False\n\n except AssertionError:\n print('WARNING: No dry dep diagnostics found for {}'.format(wd))\n No_DryDep = True\n\n # Get convective scale wet deposition\n try:\n Cds = AC.get_WetLossConv_ds(wd=wd,\n dates2use=dates2use)\n\n # Add all all-NIT to dataset\n if any([('NIT' in i) for i in Specs]):\n Cds = AC.AddChemicalFamily2Dataset(Cds, fam='NIT-all',\n prefix=WLprefix)\n No_C_WetDep = False\n except AssertionError:\n print('WARNING: No ConvWetDep diags. found for {}'.format(wd))\n No_C_WetDep = True\n\n # Get large scale wet deposition\n try:\n LSds = AC.get_WetLossLS_ds(wd=wd, dates2use=dates2use)\n\n # Add all all-NIT to dataset\n if any([('NIT' in i) for i in Specs]):\n LSds = AC.AddChemicalFamily2Dataset(LSds, fam='NIT-all',\n prefix=LSprefix)\n No_LS_WetDep = False\n except AssertionError:\n print('WARNING: No LSWetDep diagnostics found for {}'.format(wd))\n No_LS_WetDep = True\n\n # Use a copy of Dry Dep xr.Dataset as template to add new data too\n try:\n Dds\n ds = Dds.copy()\n except NameError:\n PrtStr = 'WARNING: Dep. output not found (Dry:{}, C Wet:{}, LS Wet:{})'\n print(PrtStr.format(No_DryDep, No_C_WetDep, No_LS_WetDep))\n return\n LongNameStr = 'Total (Dry and Wet) deposition flux of species {}'\n # Loop by requested species\n for Spec in Specs:\n # RMM\n SpecRMM = AC.species_mass(Spec)\n NewVar = '{}{}'.format(NewDepPrefix, Spec)\n long_name = LongNameStr.format(Spec)\n try:\n\n # Get dry dep deposition\n Var = '{}{}'.format(DDprefix, Spec)\n if verbose:\n print(Dds[Var])\n # Convert units to kg/m2/s (from )\n ExpectedUnits = 'molec cm-2 s-1'\n attrs = Dds[Var].attrs\n if (attrs['units'] == ExpectedUnits):\n if verbose:\n PrtStr = \"NOTE: updating '{}' DryDep units for {}\"\n print(PrtStr.format(Spec, wd))\n ds[NewVar] = Dds[Var].copy() / AVG * 1E-4 * SpecRMM\n units = 'kg m-2 s-1'\n else:\n PrtStr = \"WARNING: Expected units of {}. Got '{}' for '{}'\"\n print( PrtStr.format(ExpectedUnits, attrs['units'], wd) )\n sys.exit()\n\n # Get LS Wet dep\n Var = '{}{}'.format(LSprefix, Spec)\n try:\n if verbose:\n print(LSds[Var])\n # Save to total dry/wet dep\n ds[NewVar] += LSds[Var].sum(dim='lev') / LSds['AREA']\n units = 'kg m-2 s-1'\n except KeyError:\n PrtStr = \"WARNING: Skiping '{}' ({}) as not in dataset (wd:{})\"\n print(PrtStr.format(Spec, Var, wd))\n\n # Get convective scale wet deposition\n Var = '{}{}'.format(WLprefix, Spec)\n try:\n if verbose:\n print(Cds[Var])\n # Save to total dry/wet dep\n ds[NewVar] += Cds[Var].sum(dim='lev') / Cds['AREA']\n units = 'kg m-2 s-1'\n except KeyError:\n PrtStr = \"WARNING: Skiping '{}' ({}) as not in dataset (wd:{})\"\n print(PrtStr.format(Spec, Var, wd))\n\n # Update units\n attrs['units'] = units\n attrs['long_name'] = long_name\n ds[NewVar].attrs = attrs\n\n except KeyError:\n PrtStr = \"WARNING: Dry and/or Wet dep '{}' not present for: '{}'\"\n print( PrtStr.format(Spec, wd) )\n\n # Select a limited set of variables to return\n vars2rtn = [i for i in ds.data_vars if (DDVprefix not in i )]\n vars2rtn = [i for i in vars2rtn if (DDprefix not in i )]\n ds = ds[vars2rtn]\n\n # Check numbers if in verbose or debug mode\n if (verbose or debug):\n for Spec in Specs:\n NewVar = '{}{}'.format(NewDepPrefix, Spec)\n\n print(NewVar, (ds[NewVar] * ds['AREA']).values.sum() )\n\n # Return total deposition to the dictionary\n return ds[vars2rtn]", "def reckon(self):", "def support(self):", "def _DeRedden(lam,flux,ra,dec,dustmap_path='/Users/vzm83/Softwares/sfddata-master'): \n m = sfdmap.SFDMap(dustmap_path) \n flux_unred = pyasl.unred(lam,flux,m.ebv(ra,dec))\n return flux_unred", "def test_direct_readme_dtool_task_run(default_direct_readme_dtool_task_spec):\n logger = logging.getLogger(__name__)\n\n logger.debug(\"Instantiate DirectReadmeTask with '{}'\".format(\n default_direct_readme_dtool_task_spec))\n\n t = DirectReadmeTask(**default_direct_readme_dtool_task_spec)\n fw_action = t.run_task({})\n logger.debug(\"FWAction:\")\n _log_nested_dict(logger.debug, fw_action.as_dict())\n\n output = fw_action.stored_data['output']\n\n # TODO: dataset creation in test\n expected_respone = {\n \"creation_date\": \"2020-11-08\",\n \"description\": \"testing description\",\n \"expiration_date\": \"2022-11-08\",\n \"funders\": [\n {\n \"code\": \"testing_code\",\n \"organization\": \"testing_organization\",\n \"program\": \"testing_program\"\n }\n ],\n \"owners\": [\n {\n \"email\": \"testing@test.edu\",\n \"name\": \"Testing User\",\n \"orcid\": \"testing_orcid\",\n \"username\": \"testing_user\"\n }\n ],\n \"project\": \"testing project\"\n }\n\n assert compare(output, expected_respone)", "def get() -> None:\n pass", "def landlord_button_deposite_received(self):\n payment_id = False\n acc_pay_form = self.env.ref(\n 'account.view_account_payment_form')\n account_jrnl_obj = self.env['account.journal'].search(\n [('type', '=', 'sale')], limit=1)\n payment_obj = self.env['account.payment']\n payment_method_id = self.env.ref(\n 'account.account_payment_method_manual_in')\n for tenancy_rec in self:\n if tenancy_rec.acc_pay_dep_rec_id and \\\n tenancy_rec.acc_pay_dep_rec_id.id:\n return {\n 'view_type': 'form',\n 'view_id': acc_pay_form.id,\n 'view_mode': 'form',\n 'res_model': 'account.payment',\n 'res_id': tenancy_rec.acc_pay_dep_rec_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': self._context,\n }\n if tenancy_rec.deposit == 0.00:\n raise Warning(_('Please Enter Deposit amount.'))\n if tenancy_rec.deposit < 0.00:\n raise Warning(\n _('The deposit amount must be strictly positive.'))\n vals = {\n 'partner_id': tenancy_rec.property_owner_id.parent_id.id,\n 'partner_type': 'customer',\n 'journal_id': account_jrnl_obj.id,\n 'payment_type': 'inbound',\n 'communication': 'Deposit Received',\n 'tenancy_id': tenancy_rec.id,\n 'amount': tenancy_rec.deposit,\n 'property_id': tenancy_rec.property_id.id,\n 'payment_method_id': payment_method_id.id\n }\n payment_id = payment_obj.create(vals)\n return {\n 'view_mode': 'form',\n 'view_id': acc_pay_form.id,\n 'view_type': 'form',\n 'res_id': payment_id and payment_id.id,\n 'res_model': 'account.payment',\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'current',\n 'domain': '[]',\n 'context': {\n 'close_after_process': True,\n }\n }", "def perform(self):\n return", "def perform(self):\n return", "def __deref__(self):\n return _spacegrant_swig.G3RUH_descramble_sptr___deref__(self)", "def get_deleter(\n owner, indentation, objs, objs_are_archived, perform_func=None\n):\n if not len(objs):\n return None\n\n def check_success(obj_type, oid):\n if obj_type != util.Type.pkg_wrapper:\n try:\n obj = util.makeAelObject(obj_type, oid)\n if obj:\n ael.poll()\n obj = util.makeAelObject(obj_type, oid)\n\n if obj:\n name = util.getAelName(obj)\n raise Exception('%s still exits' % name)\n except Exception as e:\n if 'entity is deleted' not in str(e):\n raise e\n\n return\n\n helper.assertCompatibleWithPerformer(objs)\n action = helper._WorldProxy.ARCHIVE_DELETE_ACTION if objs_are_archived \\\n else helper._WorldProxy.LIVE_DELETE_ACTION\n obj_type = util.getType(objs[0])\n if obj_type == util.Type.trade:\n params = owner.other_params['gui_params']\n return helper.make_FNewExpirationPerform_performer(\n owner, params, objs, _AelTradesDeleter,\n check_success, action\n )\n elif obj_type == util.Type.ins:\n params = owner.other_params['gui_params']\n return helper.make_FNewExpirationPerform_performer(\n owner, params, objs, _AelInstrumentsDeleter,\n check_success, action\n )\n\n perform_func = perform_func or _delete\n return helper.make_generic_performer(\n owner, objs, util.INDENTATION if indentation == None else indentation,\n _AelGenericDeleter, check_success, perform_func\n )", "def disable_discovery(self):", "def pull(self):", "def patch(self):\n return super(TenderAwardDocumentResource, self).patch()", "def denotation(self, model):\n # return self.atom.extension(cache, state)\n return model.primitive_denotation(self.atom)", "def disk_get(context, disk_id):\n return NotImplemented", "def _Delete(self):\n pass", "def pdb2pka_desolv_backgr(self,residue):\n protein,routines,forcefield,apbs_setup,lig_titgrps = pdb2pka.pre_init(pdbfilename=self.pdbfile,\n ff='parse',\n ligand=None,\n verbose=1)\n mypkaRoutines = pdb2pka.pKaRoutines(protein, routines, forcefield,apbs_setup)\n #\n # Find our group\n #\n sp=residue.split(':')\n chainid=sp[0]\n #if chainid!='':\n # raise Exception,'pKD cannot handle PDB files with ChainIDs!'\n resnum=int(sp[1])\n target=sp[2]\n mypkaRoutines.findTitratableGroups()\n this_pKa=None\n for pKa in mypkaRoutines.pKas:\n print pKa.residue\n print pKa.uniqueid\n print pKa.residue.resSeq,resnum, pKa.residue.chainID,'CID',chainid,pKa.residue.name,target,pKa.pKaGroup.name,target\n #if pKa.residue.resSeq==resnum and pKa.residue.chainID==chainid and pKa.residue.name==target and pKa.pKaGroup.name==target:\n if pKa.residue.resSeq==resnum and pKa.residue.chainID==chainid and pKa.residue.name==target and pKa.pKaGroup.name==target:\n this_pKa=pKa\n break\n if not this_pKa:\n raise Exception,'Could not find inserted titratable group'\n mypkaRoutines.calculateBackground(onlypKa=pKa)\n mypkaRoutines.calculateDesolvation(onlypKa=pKa)\n #\n # Get the intrinsic pKa\n #\n pKaGroup=pKa.pKaGroup\n Gtype=pKa.pKaGroup.type\n #\n # We measure intrinsic pKa values against a single reference state\n #\n desolv=[]\n backgr=[]\n import math\n ln10=math.log(10)\n for titration in pKaGroup.DefTitrations:\n #\n # Find an uncharged reference state\n #\n ref_state=mypkaRoutines.neutral_ref_state[pKa][titration]\n all_states=titration.allstates\n all_states.sort()\n for state in all_states:\n if mypkaRoutines.is_charged(pKa,titration,state)==1:\n dpKa_desolv=(pKa.desolvation[state]-pKa.desolvation[ref_state])/ln10\n dpKa_backgr=(pKa.background[state]-pKa.background[ref_state])/ln10\n #\n # Make acid and base modifications\n #\n if Gtype=='base':\n dpKa_desolv=-dpKa_desolv\n dpKa_backgr=-dpKa_backgr\n #\n # Now calculate intrinsic pKa\n #\n backgr.append(dpKa_backgr)\n desolv.append(dpKa_desolv)\n intpKa=titration.modelpKa+dpKa_desolv+dpKa_backgr\n #print 'Energy difference for %s -> %s [reference state] is %5.2f pKa units' %(state,ref_state,intpKa)\n pKa.intrinsic_pKa[state]=intpKa\n else:\n #\n # Neutral states - why do we treat them differently?\n #\n dpKa_desolv=(pKa.desolvation[state]-pKa.desolvation[ref_state])/ln10\n dpKa_backgr=(pKa.background[state]-pKa.background[ref_state])/ln10\n #\n # Make acid and base modifications\n #\n if Gtype=='base':\n dpKa_desolv=-dpKa_desolv\n dpKa_backgr=-dpKa_backgr\n backgr.append(dpKa_backgr)\n desolv.append(dpKa_desolv)\n dpKa=dpKa_desolv+dpKa_backgr\n #print 'Energy difference for %s -> %s [reference state] is %5.2f kT' %(state,ref_state,dpKa)\n pKa.intrinsic_pKa[state]=dpKa\n #print '-----------------'\n #\n # One value is zero, so just get the avg of the rest\n #\n des_sum=0.0\n for val in desolv:\n des_sum=des_sum+val\n des_sum=des_sum/float(len(desolv)-1)\n #\n bac_sum=0.0\n for val in backgr:\n bac_sum=bac_sum+val\n bac_sum=bac_sum/float(len(backgr)-1)\n return {residue:des_sum},{residue:bac_sum}", "def deleterule(self, sourceKey, destKey):\n return self.execute_command(self.DELETERULE_CMD, sourceKey, destKey)", "def plugh():", "def __init__(self, delf_dir):\n self.des_dir = delf_dir", "def delegate(self, delegee_gidfile, caller_keyfile, caller_gidfile):\n # get the gid of the object we are delegating\n object_gid = self.get_gid_object()\n object_hrn = object_gid.get_hrn() \n \n # the hrn of the user who will be delegated to\n delegee_gid = GID(filename=delegee_gidfile)\n delegee_hrn = delegee_gid.get_hrn()\n \n #user_key = Keypair(filename=keyfile)\n #user_hrn = self.get_gid_caller().get_hrn()\n subject_string = \"%s delegated to %s\" % (object_hrn, delegee_hrn)\n dcred = Credential(subject=subject_string)\n dcred.set_gid_caller(delegee_gid)\n dcred.set_gid_object(object_gid)\n dcred.set_parent(self)\n dcred.set_expiration(self.get_expiration())\n dcred.set_privileges(self.get_privileges())\n dcred.get_privileges().delegate_all_privileges(True)\n #dcred.set_issuer_keys(keyfile, delegee_gidfile)\n dcred.set_issuer_keys(caller_keyfile, caller_gidfile)\n dcred.encode()\n dcred.sign()\n\n return dcred", "def test_desert_fodder():\n instance = topo.Desert()\n assert instance.current_fodder() == 0", "def get_data():\n pass", "def delete(self):\n ...", "def target(self):", "def getD(self):\r\n return self.D", "def kb_retract(self, fact_or_rule):\n\n\n\n printv(\"Retracting {!r}\", 0, verbose, [fact_or_rule])\n ####################################################\n # Student code goes here\n if fact_or_rule in self.facts:\n ind = self.facts.index(fact_or_rule)\n f_r = self.facts[ind]\n elif fact_or_rule in self.rules:\n ind = self.rules.index(fact_or_rule)\n f_r = self.rules[ind]\n else:\n print(\"Fact/Rule not found???????\")\n return\n\n if isinstance(f_r, Rule) and len(f_r.supported_by) == 0:\n return\n if len(f_r.supported_by) > 0:\n return\n\n self.kb_retract_helper(fact_or_rule)", "def perform(self):\n pass", "def DocenteDetalleAPI(request, pk):\n try:\n docente = Docente.objects.get(pk=pk)\n except Docente.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = DocenteSerializer(docente)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = DocenteSerializer(docente, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n docente.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def download(self,connector,condition):\n c= connector.cursor()\n\n\n snew = buildSelect(self,'DIF',condition)\n# print snew\n c.execute(snew)\n lnames=[]\n for name,val in sorted(self.__dict__.iteritems()):\n lnames.append(name)\n\n vobj=[]\n for row in c:\n# print row\n obj=DIFDef()\n for i in range(len(lnames)):\n obj.__dict__[lnames[i]]=row[i]\n vobj.append(obj)\n\n \n return vobj", "def _get(self):\n return None" ]
[ "0.56659234", "0.5606022", "0.5375053", "0.5375053", "0.53526074", "0.5315714", "0.52649176", "0.5223769", "0.52229035", "0.5210231", "0.51464874", "0.50903726", "0.5070479", "0.5032436", "0.4937754", "0.4928767", "0.4920147", "0.49045533", "0.48884135", "0.48746234", "0.48690954", "0.48394307", "0.47936922", "0.47733063", "0.47563", "0.47383124", "0.472987", "0.47264954", "0.47229353", "0.4721379", "0.4712606", "0.47058654", "0.4701395", "0.46980262", "0.46893847", "0.46852455", "0.46852455", "0.46828443", "0.467269", "0.4663544", "0.46596727", "0.46593925", "0.46506086", "0.464098", "0.4640383", "0.4640383", "0.4640383", "0.4640383", "0.46383372", "0.46340176", "0.46338895", "0.46319342", "0.46306348", "0.46228033", "0.46032742", "0.4602692", "0.4597655", "0.45941725", "0.4591209", "0.4591209", "0.4591209", "0.45806026", "0.45797464", "0.45748568", "0.45726076", "0.4571355", "0.4570224", "0.45659995", "0.45631996", "0.45560503", "0.45538875", "0.4545545", "0.45421457", "0.45354968", "0.45304078", "0.45270118", "0.45270118", "0.45213625", "0.4520812", "0.45099574", "0.4506181", "0.44986132", "0.4494875", "0.4494849", "0.4489775", "0.44818735", "0.44808593", "0.44699934", "0.4468415", "0.44676954", "0.4466424", "0.44599923", "0.44591045", "0.44569775", "0.44569492", "0.44566202", "0.4446001", "0.44439718", "0.44426894", "0.44385925" ]
0.45862034
61
Depict ligand using Pubchem templates.
def _get_2D_by_pubchem(self, code, mol): try: template_path = self._get_pubchem_template_path(code) if template_path: template = self._load_template(template_path) if mol.HasSubstructMatch(template): AllChem.GenerateDepictionMatching2DStructure(mol, template) flaws = DepictionValidator(mol).depiction_score() return DepictionResult( source=DepictionSource.PubChem, template_name=code, mol=mol, score=flaws, ) except Exception as e: print(str(e), file=sys.stderr) return DepictionResult( source=DepictionSource.Failed, template_name=None, mol=None, score=1000 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_ligand_template(self, ligand_template: pd.Series) -> oechem.OEMolBase:\n from openeye import oechem\n\n from ..modeling.OEModeling import read_molecules, select_chain, select_altloc, remove_non_protein\n from ..utils import FileDownloader, LocalFileStorage\n\n logging.debug(\"Interpreting structure ...\")\n pdb_path = LocalFileStorage.rcsb_structure_pdb(\n ligand_template[\"structure.pdb_id\"], self.cache_dir\n )\n if not pdb_path.is_file():\n logging.debug(\n f\"Downloading PDB entry {ligand_template['structure.pdb_id']} ...\"\n )\n FileDownloader.rcsb_structure_pdb(ligand_template[\"structure.pdb_id\"], self.cache_dir)\n logging.debug(\"Reading structure ...\")\n ligand_template_structure = read_molecules(pdb_path)[0]\n\n logging.debug(\"Selecting chain ...\")\n ligand_template_structure = select_chain(ligand_template_structure, ligand_template[\"structure.chain\"])\n\n if ligand_template[\"structure.alternate_model\"] != \"-\":\n logging.debug(\"Selecting alternate location ...\")\n try:\n ligand_template_structure = select_altloc(\n ligand_template_structure, ligand_template[\"structure.alternate_model\"]\n )\n except ValueError:\n logging.debug(\n \"Could not find alternate location \"\n f\"{ligand_template['structure.alternate_model']} for PDB entry \"\n f\"{ligand_template['structure.pdb_id']} chain \"\n f\"{ligand_template['structure.chain']}. Continuing without selecting \"\n \"alternate location ...\"\n )\n pass\n\n logging.debug(\"Removing everything but protein, water and ligand of interest ...\")\n ligand_template_structure = remove_non_protein(\n ligand_template_structure, exceptions=[ligand_template[\"ligand.expo_id\"]], remove_water=False\n )\n\n logging.debug(\"Adding hydrogens ...\")\n oechem.OEPlaceHydrogens(ligand_template_structure)\n\n return ligand_template_structure", "def initial_representations():\n cmd.hide('everything', 'all')\n cmd.show('cartoon', 'all')\n cmd.select('ligand', 'resn NFT')\n cmd.deselect()\n cmd.show(\"sticks\", \"ligand\")", "def forest_code(aut,\n\tname='',\n\tdomain='wrt QNB',\n\tinclude_styles = True,\n\thoriz=True,\n\tLTR=True,\n\tstandalone=True,\n\tdraw_revealing=True,\n):\n\tif name.startswith('$') and not name.endswith('$'):\n\t\traise ValueError(\"Arrow names must end with a $ if they begin with a $.\")\n\t\n\t#1. Decide which domain to use for plotting.\n\tdomain = handle_domain(domain, aut)\n\trange, intersection = intersection_from_domain(domain, aut)\n\tis_repatt_specialised = partial(is_repatt, intersection=intersection, aut=aut)\n\t\n\t# Number the leaves.\n\tdomain = [ ( w, i, draw_revealing and is_repatt_specialised(w, True) )\n\t\tfor (i, w) in enumerate(domain, start=1)]\n\trange = [ ( w, i, draw_revealing and is_repatt_specialised(w, False))\n\t\tfor (i, w) in enumerate(range, start=1) ]\n\t#Order the range using Higman's words\n\trange.sort()\n\t\n\ttemplate = setup()\n\treturn template.render(\n\t\t#options\n\t\tname = name,\n\t\tdomain = domain,\n\t\trange = range,\n\t\thoriz = horiz,\n\t\tstandalone = standalone,\n\t\tinclude_styles = include_styles,\n\t\twrite_word = partial(write_word, intersection = intersection),\n\t\tLTR = LTR\n\t)", "def _select_ligand_template(\n self,\n klifs_kinase_id: int,\n ligand: oechem.OEMolBase,\n dfg: Union[str or None],\n ac_helix: Union[str or None],\n ) -> pd.Series:\n import pandas as pd\n\n from ..utils import LocalFileStorage\n\n logging.debug(\"Searching kinase information from KLIFS ...\")\n klifs_kinases = pd.read_csv(LocalFileStorage.klifs_kinase_db(self.cache_dir))\n reference_pocket = klifs_kinases[\n klifs_kinases[\"kinase.klifs_id\"] == klifs_kinase_id\n ][\"kinase.pocket\"].iloc[0]\n reference_pocket = reference_pocket.replace(\"_\", \"\")\n\n logging.debug(\"Retrieve kinase structures from KLIFS for ligand template selection ...\")\n structures = self._get_available_ligand_templates()\n\n if dfg:\n logging.debug(f\"Filtering for ligands bound to a kinase in the DFG {dfg} conformation ...\")\n structures = structures[structures[\"structure.dfg\"] == dfg]\n\n if ac_helix:\n logging.debug(f\"Filtering for ligands bound to a kinase in the alpha C helix {dfg} conformation ...\")\n structures = structures[structures[\"structure.ac_helix\"] == ac_helix]\n\n logging.debug(\"Storing SMILES in structures dataframe ...\")\n structures = self._add_smiles_column(structures)\n\n logging.debug(\"Searching for identical co-crystallized ligands ...\")\n identical_ligands = self._get_identical_ligand_indices(ligand, structures[\"smiles\"]) # TODO: Takes surprisingly long\n\n if len(identical_ligands) > 0:\n logging.debug(\"Found identical co-crystallized ligands ...\")\n structures = structures.iloc[identical_ligands]\n logging.debug(\"Searching for matching KLIFS kinase id ...\")\n if (structures[\"kinase.klifs_id\"] == klifs_kinase_id).any():\n logging.debug(\"Found matching KLIFS kinase id ...\")\n structures = structures[structures[\"kinase.klifs_id\"] == klifs_kinase_id]\n else:\n if self.shape_overlay:\n logging.debug(\"Filtering for most similar ligands according to their shape overlay ...\")\n structures = self._filter_for_similar_ligands_3d(ligand, structures)\n else:\n logging.debug(\"Filtering for most similar ligands according to their fingerprints ...\")\n structures = self._filter_for_similar_ligands_2d(ligand, structures)\n\n logging.debug(\"Filtering for most similar kinase pockets ...\")\n structures = self._filter_for_similar_kinase_pockets(reference_pocket, structures)\n\n logging.debug(\"Picking structure with highest KLIFS quality ...\")\n structure_for_ligand = structures.iloc[0]\n\n return structure_for_ligand", "def litchi(args):\n p = OptionParser(litchi.__doc__)\n opts, args, iopts = p.set_image_options(args, figsize=\"9x6\")\n\n if len(args) != 4:\n sys.exit(not p.print_help())\n\n datafile, bedfile, slayout, switch = args\n fig = plt.figure(1, (iopts.w, iopts.h))\n root = fig.add_axes([0, 0, 1, 1])\n\n Synteny(fig, root, datafile, bedfile, slayout, switch=switch)\n\n # legend showing the orientation of the genes\n draw_gene_legend(root, 0.4, 0.7, 0.82)\n\n # On the left panel, make a species tree\n fc = \"lightslategrey\"\n\n coords = {}\n xs, xp = 0.16, 0.03\n coords[\"lychee\"] = (xs, 0.37)\n coords[\"clementine\"] = (xs, 0.5)\n coords[\"cacao\"] = (xs, 0.6)\n coords[\"strawberry\"] = (xs, 0.7)\n coords[\"grape\"] = (xs, 0.8)\n xs -= xp\n coords[\"Sapindales\"] = join_nodes(root, coords, \"clementine\", \"lychee\", xs)\n xs -= xp\n coords[\"Rosid-II\"] = join_nodes(root, coords, \"cacao\", \"Sapindales\", xs)\n xs -= xp\n coords[\"Rosid\"] = join_nodes(root, coords, \"strawberry\", \"Rosid-II\", xs)\n xs -= xp\n coords[\"crown\"] = join_nodes(root, coords, \"grape\", \"Rosid\", xs, circle=False)\n\n # Names of the internal nodes\n for tag in (\"Rosid\", \"Rosid-II\", \"Sapindales\"):\n nx, ny = coords[tag]\n nx, ny = nx - 0.01, ny - 0.02\n root.text(nx, ny, tag, rotation=90, ha=\"right\", va=\"top\", color=fc)\n\n root.set_xlim(0, 1)\n root.set_ylim(0, 1)\n root.set_axis_off()\n\n pf = \"litchi\"\n image_name = pf + \".\" + iopts.format\n savefig(image_name, dpi=iopts.dpi, iopts=iopts)", "def prepare_entry(hoverentry, fig, ypos, name_index, ligsname_index, liglname_index, recept_info, pdb_index, peplig_index): \n dynid = dendro_leaves[int((ypos-5)/10)]\n nodyn_id = dynid.replace('dyn','')\n pdbcode = recept_info[dynid][pdb_index]\n simname = recept_info[dynid][name_index]\n peplig = recept_info[dynid][peplig_index]\n ligsname = recept_info[dynid][ligsname_index]\n liglname = recept_info[dynid][liglname_index]\n bgcolor = hoverentry['marker']['color']\n anot_text = \"%s (%s)<b style='display: none'>%s</b>\" % (simname, pdbcode, dynid)\n if (ligsname):\n # If peptide ligand, take long-name of ligand (regular name)\n if (peplig):\n hovertext = str(\"complex with %s (dynID: %s)\" % (liglname, nodyn_id))\n else: # Else take short name (Residue ID)\n hovertext = str(\"complex with %s (dynID: %s)\" % (ligsname, nodyn_id))\n else:\n hovertext = str(\"apoform (dynID: %s)\" % (nodyn_id)) \n\n # Annotation to corresponding simulation\n colorfont = black_or_white(bgcolor)\n annotations.append(define_annotation_list(ypos, bgcolor, anot_text, colorfont, dynid, hovertext))\n\n return(fig, annotations)", "def addPublication():\n preloaded = [\n {\"description\": \"bortaS <b>bIr</b> jablu'DI' reH QaQqu' nay'!\"},\n {\"language\": \"en\"},\n {\"country\": \"usa\"}\n ]\n return render_template(\"addPublication.html\", msg=\"\", preloaded=preloaded)", "def template1(self):\n self.indirectobject(1, 0, \"<<\\n /Type /Catalog\\n /Outlines 2 0 R\\n /Pages 3 0 R\\n>>\")\n self.indirectobject(2, 0, \"<<\\n /Type /Outlines\\n /Count 0\\n>>\")\n self.indirectobject(3, 0, \"<<\\n /Type /Pages\\n /Kids [4 0 R]\\n /Count 1\\n>>\")\n self.indirectobject(4, 0, \"<<\\n /Type /Page\\n /Parent 3 0 R\\n /MediaBox [0 0 612 792]\\n /Contents 5 0 R\\n /Resources <<\\n /ProcSet [/PDF /Text]\\n /Font << /F1 6 0 R >>\\n >>\\n>>\")\n self.indirectobject(6, 0, \"<<\\n /Type /Font\\n /Subtype /Type1\\n /Name /F1\\n /BaseFont /Helvetica\\n /Encoding /MacRomanEncoding\\n>>\")", "def _prov_html(self):\n ret = {\n 'rt_label': self.rt_label,\n 'uri': self.uri,\n 'uri_encoded': self.uri_encoded,\n 'label': self.label,\n 'nid': self.nid,\n 'gat': self.gat,\n 'rs_encoded': self.rs_encoded,\n 'rs_label': self.rs_label,\n 'sa': self.sa,\n 'ea': self.ea\n }\n\n prov_data = self._prov_rdf().serialize(format='turtle')\n\n return render_template(\n 'class_report_prov.html',\n report=ret,\n prov_data=prov_data\n )", "def generate_garmin_kml(self, d ):\n return (\"\"\"\n <GroundOverlay>\n <Icon>\n <href>%(image_url)s</href>\n <DrawOrder>%(draw_order)d</DrawOrder>\n </Icon>\n <LatLonBox>\n <north>%(north).14f</north>\n <south>%(south).14f</south>\n <east>%(east).14f</east>\n <west>%(west).14f</west>\n </LatLonBox>\n </GroundOverlay>\"\"\" % d )", "def get_data_mrk():\n return render_template(\"l_markers.html\")", "def buildGSUB(self):\n\t\t# Construct GSUB table bottom-up.\n\t\tli_fi = Ligature()\n\t\tli_fi.LigGlyph = 'f_i'\n\t\tli_fi.Component = ['i']\n\t\tli_fi.CompCount = 2\n\n\t\tliSubst = LigatureSubst()\n\t\tliSubst.ligatures = {'f': li_fi}\n\t\tliSubst.Format = 1\n\t\tliSubst.LookupType = 4\n\n\t\tlookup = Lookup()\n\t\tlookup.LookupType = 4 # Ligature\n\t\tlookup.LookupFlag = 0\n\t\tlookup.SubTable = [liSubst]\n\t\tlookup.SubTableCount = len(lookup.SubTable)\n\n\t\tlookupList = LookupList()\n\t\tlookupList.Lookup = [lookup]\n\t\tlookupList.LookupCount = len(lookupList.Lookup)\n\n\t\tfea = Feature()\n\t\tfea.FeatureParams = None\n\t\tfea.LookupCount = 1\n\t\tfea.LookupListIndex = [0]\n\n\t\tfeaRecord = FeatureRecord()\n\t\tfeaRecord.FeatureTag = 'liga'\n\t\tfeaRecord.Feature = fea\n\n\t\tfeaList = FeatureList()\n\t\tfeaList.FeatureRecord = [feaRecord]\n\t\tfeaList.FeatureCount = len(feaList.FeatureRecord)\n\n\t\tlangSys = LangSys()\n\t\tlangSys.LookupOrder = None\n\t\tlangSys.ReqFeatureIndex = 0xFFFF\n\t\tlangSys.FeatureIndex = [0]\n\t\tlangSys.FeatureCount = len(langSys.FeatureIndex)\n\n\t\tsct = Script()\n\t\tsct.DefaultLangSys = langSys\n\t\tsct.LangSysRecord = []\n\t\tsct.LangSysCount = len(sct.LangSysRecord)\n\n\t\tsctRec = ScriptRecord()\n\t\tsctRec.ScriptTag = 'tag1'\n\t\tsctRec.Script = sct\n\n\t\tsctList = ScriptList()\n\t\tsctList.ScriptRecord = [sctRec]\n\t\tsctList.ScriptCount = len(sctList.ScriptRecord)\n\n\t\tgsub = GSUB()\n\t\tgsub.LookupList = lookupList\n\t\tgsub.FeatureList = feaList\n\t\tgsub.ScriptList = sctList\n\n\t\ttable = ttLib.newTable('GSUB')\n\t\ttable.table = gsub\n\t\treturn table", "def __repr__(self: GtinFormat) -> str:\n return f\"GtinFormat.{self.name}\"", "def prescription(self):\n prescription = \"\\n{0:>10}\\t{1:>10}\\t{2:>10}\\t{3:>10}\\n\".format(\"R\",\"Material\",\"d\",\"diameter\")\n for surface in self.lensSurfaces():\n prescription += \"{0:>10.2f}\\t{1:>10}\\t{2:>10.2f}\\t{3:>10.2f}\\n\".format(surface.R, str(surface.mat), surface.spacing, surface.diameter)\n return prescription", "def lammps_cell_text(structure):\n\n cell_text = f\"\"\"\n0.0 {structure.cell[0, 0]} xlo xhi\n0.0 {structure.cell[1, 1]} ylo yhi\n0.0 {structure.cell[2, 2]} zlo zhi\n{structure.cell[1, 0]} {structure.cell[2, 0]} {structure.cell[2, 1]} xy xz yz\n\"\"\"\n\n return cell_text", "def gremlin(self):\r\n initial = '{} = g.makeType().name(\"{}\").{}{}makeEdgeLabel()'\r\n primary_key = ''\r\n if self.primary_key:\r\n primary_key = \"primaryKey({}).\".format(self.primary_key)\r\n\r\n functional = \"functional().\" if self.functional else \"\"\r\n\r\n return initial.format(self.label, self.label, primary_key, functional)", "def get_gsub_ligature_lookup(font):\n\n # The template might include more lookups after lookup 0, if it has a\n # GSUB table.\n if 'GSUB' not in font:\n ligature_subst = otTables.LigatureSubst()\n ligature_subst.ligatures = {}\n\n lookup = otTables.Lookup()\n lookup.LookupType = 4\n lookup.LookupFlag = 0\n lookup.SubTableCount = 1\n lookup.SubTable = [ligature_subst]\n\n font['GSUB'] = add_emoji_gsub.create_simple_gsub([lookup])\n else:\n lookup = font['GSUB'].table.LookupList.Lookup[0]\n assert lookup.LookupFlag == 0\n\n # importXML doesn't fully init GSUB structures, so help it out\n st = lookup.SubTable[0]\n if not hasattr(lookup, 'LookupType'):\n assert st.LookupType == 4\n setattr(lookup, 'LookupType', 4)\n\n if not hasattr(st, 'ligatures'):\n setattr(st, 'ligatures', {})\n\n return lookup", "def __str__(self):\n t = Template(\n \"\"\"\n <h4>$title</h4>\n $imgs\n $footnotes\n <hr/>\"\"\")\n # Return result.\n return t.substitute({\n \"title\": self.title,\n \"imgs\": self.render_images(),\n \"footnotes\": self.render_footnotes()\n })", "def generate_example_label(self):\n label_markup = \"\"\"{keywords}procedure{i} {blocks}Foo{i}\n (An_Integer : {keywords}in out{i} {types}Integer{i} := {numbers}0{i};\n A_String : {types}String{i} := {strings}\"some text\"{i})\n {keywords}with{i} {aspects}Pre => An_Integer >= -1{i};\n{comments}-- Documentation for Foo{i}\n\n---------\n-- Foo --\n---------\n\n{keywords}procedure{i} {blocks}Foo{i}\n (An_Integer : {keywords}in out{i} {types}Integer{i} := {numbers}0{i};\n A_String : {types}String{i} := {strings}\"some text\"{i}) {keywords}is{i}\n{keywords}begin{i}\n {comments}-- Do the actual loop{i}\n\n {keywords}for{i} J {keywords}in{i} A_String'Range {keywords}loop{i}\n Put_Line ({strings}\"bla\"{i} &amp; (A + {numbers}10{i})'Img);\n {keywords}end loop{i};\n{keywords}end{i} {blocks}Foo{i};\n\"\"\"\n # Add line numbers\n num = 1\n prefixed = []\n\n # Compute the preview's gutter foreground color by mixing the editor's\n # foreground and background color.\n # This formula needs to be synchronized with the formula that computes\n # the 'gutter_color' in gps.css.\n gutter_fg_color = self.d['editor_fg'].mix(self.d['editor_bg'], 0.6)\n\n for line in label_markup.splitlines():\n prefixed.append(\n '<span color=\"{}\">{:4d} </span> {}'.format(\n gutter_fg_color.to_hex6_string(),\n num,\n line))\n num = num + 1\n\n font = GPS.Preference(\"Src-Editor-Reference-Style\").get().split(\"@\")[0]\n label_markup = '<span font=\"{}\">'.format(font) + '\\n'.join(\n prefixed) + '</span>'\n\n b = Gtk.HBox()\n label = Gtk.Label()\n b.pack_start(label, False, False, 0)\n _, bg = Gdk.Color.parse(self.d['editor_bg'].to_hex6_string())\n _, fg = Gdk.Color.parse(self.d['editor_fg'].to_hex6_string())\n b.modify_bg(Gtk.StateType.NORMAL, bg)\n label.modify_fg(Gtk.StateType.NORMAL, fg)\n process_dict = {'i': \"</span>\"}\n for key in ['keywords', 'blocks', 'comments',\n 'strings', 'numbers', 'aspects', 'types']:\n val = self.d[key]\n process_dict[key] = '<span {} {} {}>'.format(\n 'color=\"{}\"'.format(val[1].to_hex6_string())\n if val[1].a != 0.0 else '',\n 'background=\"{}\"'.format(val[2].to_hex6_string())\n if val[2].a != 0.0 else '',\n\n 'font-weight=\"BOLD\"' if 'BOLD' in val[0] else '' +\n ' font-style=\"ITALIC\"' if \"ITALIC\" in val[0] else '')\n\n label.set_markup(label_markup.format(**process_dict))\n return b", "def generate_lookat_kml_block(self, lng, lat, viewrange):\n return \"\"\"\n <LookAt>\n <longitude>%.14f</longitude>\n <latitude>%.14f</latitude>\n <altitude>0</altitude>\n <range>%.f</range>\n <tilt>0</tilt>\n <heading>0</heading>\n </LookAt>\n\"\"\" % (lng, lat, viewrange)", "def render_knowl_in_template(knowl_content, **kwargs):\n render_me = u\"\"\"\\\n {%% include \"knowl-defs.html\" %%}\n {%% from \"knowl-defs.html\" import KNOWL with context %%}\n {%% from \"knowl-defs.html\" import KNOWL_LINK with context %%}\n {%% from \"knowl-defs.html\" import KNOWL_INC with context %%}\n {%% from \"knowl-defs.html\" import TEXT_DATA with context %%}\n\n %(content)s\n \"\"\"\n knowl_content = md_preprocess(knowl_content)\n\n # markdown enabled\n render_me = render_me % {'content': md.convert(knowl_content)}\n # Pass the text on to markdown. Note, backslashes need to be escaped for\n # this, but not for the javascript markdown parser\n try:\n return render_template_string(render_me, **kwargs)\n except Exception as e:\n return \"ERROR in the template: %s. Please edit it to resolve the problem.\" % e", "def tag(self):\n \n tag = super(self.__class__, self).tag();\n tag = als.tag_join(tag, als.stra(self.strain));\n tag = als.tag_join(tag, als.stra(self.dtype));\n tag = als.tag_join(tag, 'w=%s' % als.stra(self.wid)); \n tag = als.tag_join(tag, 's=%s' % als.stra(self.stage));\n #tag = analysis.tag_join(tag, 'l=%s' % analysis.stra(self.label)); \n\n return tag;", "def label_hemispheres( x, template, templateLR, reg_iterations=[200,50,2,0] ):\n reg = ants.registration(\n ants.rank_intensity(x),\n ants.rank_intensity(template),\n 'SyN',\n aff_metric='GC',\n syn_metric='CC',\n syn_sampling=2,\n reg_iterations=reg_iterations,\n random_seed = 1 )\n return( ants.apply_transforms( x, templateLR, reg['fwdtransforms'],\n interpolator='genericLabel') )", "def _generate(self, markup=None):\n raise NotImplementedError", "def tagger():", "def generate(net, z, maxlen=50, im=None, init=None, use_end=True):\n caption = lm_tools.sample(net, z['word_dict'], z['index_dict'], num=maxlen, Im=im, initial=init, use_end=use_end)\n print ' '.join(caption)", "def get_html(self):\r\n context = {\r\n 'display_name': self.display_name_with_default,\r\n 'instructions_html': self.instructions,\r\n 'annotation_storage': self.annotation_storage_url,\r\n 'token': retrieve_token(self.user, self.annotation_token_secret),\r\n 'tag': self.instructor_tags,\r\n 'openseadragonjson': self.openseadragonjson,\r\n }\r\n\r\n return self.system.render_template('imageannotation.html', context)", "def label(mi_, ma_):\n\treturn \"caractères Unicode des points de code {} à {}\".format(mi_, ma_)", "def make_lexicon_txt(self):\n raise NotImplementedError", "def _superpose_templates(\n design_unit: oechem.OEDesignUnit,\n ligand_template_structure: oechem.OEMolBase,\n ligand_template: pd.Series,\n chain_id: Union[str, None],\n ) -> Tuple[oechem.OEGraphMol, oechem.OEGraphMol]:\n from openeye import oechem\n\n from ..modeling.OEModeling import (\n superpose_proteins,\n select_chain,\n residue_ids_to_residue_names,\n )\n\n logging.debug(\"Extracting protein and solvent ...\")\n solvated_kinase_domain = oechem.OEGraphMol()\n design_unit.GetComponents(\n solvated_kinase_domain, oechem.OEDesignUnitComponents_Protein | oechem.OEDesignUnitComponents_Solvent\n )\n if chain_id:\n logging.debug(f\"Deleting all chains but {chain_id} ...\")\n solvated_kinase_domain = select_chain(solvated_kinase_domain, chain_id)\n\n logging.debug(\"Retrieving KLIFS kinase pocket residues ...\")\n pocket_residue_ids = [\n int(residue_id) for residue_id in ligand_template[\"structure.pocket_resids\"].split()\n ]\n pocket_residue_names = residue_ids_to_residue_names(\n ligand_template_structure, pocket_residue_ids\n )\n pocket_residues = [\n f\"{residue_name}{residue_id}\"\n for residue_name, residue_id in zip(pocket_residue_names, pocket_residue_ids)\n ]\n logging.debug(f\"Residues for superposition: {pocket_residues}\")\n\n logging.debug(\"Superposing structure on kinase domain ...\")\n solvated_kinase_domain = superpose_proteins(\n ligand_template_structure, solvated_kinase_domain, pocket_residues, ligand_template[\"structure.chain\"]\n )\n\n logging.debug(\"Separating solvent from kinase domain ...\")\n kinase_domain, solvent = oechem.OEGraphMol(), oechem.OEGraphMol()\n oechem.OESplitMolComplex(\n oechem.OEGraphMol(), kinase_domain, solvent, oechem.OEGraphMol(), solvated_kinase_domain\n )\n\n # perceive residues to remove artifacts of other design units in the sequence of the protein\n # preserve certain properties to assure correct behavior of the pipeline,\n # e.g. deletion of chains in OEKLIFSKinaseApoFeaturizer._process_kinase_domain method\n preserved_info = (\n oechem.OEPreserveResInfo_ResidueNumber\n | oechem.OEPreserveResInfo_ResidueName\n | oechem.OEPreserveResInfo_AtomName\n | oechem.OEPreserveResInfo_ChainID\n | oechem.OEPreserveResInfo_HetAtom\n | oechem.OEPreserveResInfo_InsertCode\n | oechem.OEPreserveResInfo_AlternateLocation\n )\n oechem.OEPerceiveResidues(kinase_domain, preserved_info)\n oechem.OEPerceiveResidues(solvent, preserved_info)\n\n return kinase_domain, solvent", "def painel():\n return render_template('home/painel.html', title=\"Painel\")", "def create_image_caption_pairs(self):", "def markup_text(self, text):\n for moniker, name in S['names'].items():\n text = text.replace('${0}'.format(moniker.split('_')[1]), name)\n return text", "def recog():\n\n return render_template('recog.html')", "def get_template_tag(self):\n return \"{% dataset \" + self.cleantitle + \" %}\"", "def __repr__(self): # pragma: no cover\r\n if self.latex == self.sans_parens:\r\n latex_repr = u'\"{}\"'.format(self.latex)\r\n else:\r\n latex_repr = u'\"{}\" or \"{}\"'.format(self.latex, self.sans_parens)\r\n\r\n if self.tall:\r\n wrap = u'<[{}]>'\r\n else:\r\n wrap = u'<{}>'\r\n\r\n return wrap.format(latex_repr)", "def addFid(data, Dim=.5, nodName=\"N\", lableName=\"1\", color=\"red\", GlyphType=1):\n\t\txyz = tuple(data)\n\t\ttipFiducial = slicer.mrmlScene.AddNode(slicer.vtkMRMLMarkupsFiducialNode())\n\t\ttipFiducial.SetName(nodName)\n\t\ttipFiducial.AddFiducial(xyz[0], xyz[1], xyz[2])\n\t\ttipFiducial.SetNthFiducialLabel(0, lableName)\n\t\tslicer.mrmlScene.AddNode(tipFiducial)\n\t\ttipFiducial.SetDisplayVisibility(True)\n\t\ttipFiducial.GetDisplayNode().SetGlyphType(GlyphType) # Vertex2D\n\t\ttipFiducial.GetDisplayNode().SetGlyphScale(Dim * 10)\n\t\ttipFiducial.GetDisplayNode().SetTextScale(3)\n\t\ttipFiducial.GetDisplayNode().SetSelectedColor(Helper.myColor(color))\n\t\t'''\tGlyphShapes {\n GlyphTypeInvalid = 0, 1-StarBurst2D, 2-Cross2D, 3-CrossDot2D,\n 4-ThickCross2D, 5-Dash2D, 6-Sphere3D, 7-Vertex2D,\n 8-Circle2D,9-Triangle2D, 10-Square2D, Diamond2D,\n Arrow2D, ThickArrow2D, HookedArrow2D, GlyphType_Last\n }'''", "def showSS(self, show):\n\t\tfrom chimera.Sequence import defHelixColor, defStrandColor\n\t\thelixReg = self.getRegion(\"structure helices\", create=1,\n\t\t\t\tfill=(1.0, 1.0, 0.8), outline=defHelixColor)\n\t\tstrandReg = self.getRegion(\"structure strands\", create=1,\n\t\t\t\tfill=(0.8, 1.0, 0.8), outline=defStrandColor)\n\t\thelixReg.shown = show\n\t\tstrandReg.shown = show\n\t\tif not show:\n\t\t\treturn\n\t\thelixReg.clear(makeCB=False) # callback will happen in\n\t\tstrandReg.clear(makeCB=False) # addBlocks below\n\n\t\tassocSeqs = {}\n\t\thelices = []\n\t\tstrands = []\n\t\tfor aseq in self.seqCanvas.mav.associations.values():\n\t\t\tassocSeqs[aseq] = 1\n\t\tfor aseq in assocSeqs.keys():\n\t\t\tinHelix = inStrand = 0\n\t\t\tfor pos in range(len(aseq.ungapped())):\n\t\t\t\tisHelix = isStrand = 0\n\t\t\t\tfor matchMap in aseq.matchMaps.values():\n\t\t\t\t\ttry:\n\t\t\t\t\t\tres = matchMap[pos]\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif res.isHelix:\n\t\t\t\t\t\tisHelix = 1\n\t\t\t\t\telif res.isStrand:\n\t\t\t\t\t\tisStrand = 1\n\t\t\t\tgapped = aseq.ungapped2gapped(pos)\n\t\t\t\tif isHelix:\n\t\t\t\t\tif inHelix:\n\t\t\t\t\t\thelices[-1][-1] = gapped\n\t\t\t\t\telse:\n\t\t\t\t\t\thelices.append([aseq, aseq,\n\t\t\t\t\t\t\t\tgapped, gapped])\n\t\t\t\t\t\tinHelix = 1\n\t\t\t\telse:\n\t\t\t\t\tif inHelix:\n\t\t\t\t\t\tinHelix = 0\n\t\t\t\tif isStrand:\n\t\t\t\t\tif inStrand:\n\t\t\t\t\t\tstrands[-1][-1] = gapped\n\t\t\t\t\telse:\n\t\t\t\t\t\tstrands.append([aseq, aseq,\n\t\t\t\t\t\t\t\tgapped, gapped])\n\t\t\t\t\t\tinStrand = 1\n\t\t\t\telse:\n\t\t\t\t\tif inStrand:\n\t\t\t\t\t\tinStrand = 0\n\t\thelixReg.addBlocks(helices)\n\t\tstrandReg.addBlocks(strands)", "def dftb_geom(name): \n dftb_geom = \"\"\"Geometry = GenFormat {\n <<< \"{{ title }}\"\n }\n \"\"\"\n return Environment().from_string(dftb_geom).render(title=name)", "def display_abstract( id, highlights=[]):\n a = Abstracts[ id ]\n for h in highlights:\n a = re.sub(r'\\b(%s)\\b'%h,'<mark>\\\\1</mark>',a, flags=re.IGNORECASE)\n print ('<blockquote>%s</blockquote>' % a)", "def draw_lagrangian_descriptor(LD, LD_type, grid_parameters, tau, p_value, norm = True, colormap_name='bone', colormap_mode=1):\n if type(grid_parameters) == dict:\n #n-DoF systems\n slice_parameters = grid_parameters['slice_parameters'] # 2n-D grid\n dims_slice = np.array(grid_parameters['dims_slice'])\n slice_axes_labels = np.array(['$x$','$y$','$p_x$','$p_y$'])\n slice_axes_labels = slice_axes_labels[dims_slice==1]\n else:\n #1-DoF systems\n slice_parameters = grid_parameters # 2-D grid\n slice_axes_labels = ['$x$', '$p_x$']\n\n ax1_min, ax1_max, N1 = slice_parameters[0]\n ax2_min, ax2_max, N2 = slice_parameters[1]\n \n if norm:\n LD = LD - np.nanmin(LD) # Scale LD output\n LD = LD / np.nanmax(LD) # Scale LD output\n \n # Plot LDs\n fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(7.5,3), dpi=200)\n \n points_ax1 = np.linspace(ax1_min, ax1_max, N1)\n points_ax2 = np.linspace(ax2_min, ax2_max, N2)\n \n if colormap_mode == 1:\n vmin, vmax = LD.min(), LD.max()\n elif colormap_mode == 2:\n vmin = LD.mean()-LD.std()\n vmax = LD.max()\n \n con0 = ax0.contourf(points_ax1, points_ax2, LD, cmap=colormap_name, vmin=vmin, vmax=vmax, levels=200)\n\n # Customise appearance\n if p_value == 2:\n str_method = 'arclength - '\n elif p_value >= 1:\n str_method = r'p-norm $(p={})$'.format(p_value)\n elif p_value == 0: \n str_method = 'action-based'\n elif p_value < 1:\n str_method = r'LD$_p$ $(p={})$'.format(p_value)\n \n t_final=abs(tau)\n if LD_type == 'forward':\n string_title = r'Forward LD {}, $\\tau={}$'.format(str_method,t_final)\n elif LD_type == 'backward':\n string_title = r'Backward LD {}, $\\tau={}$'.format(str_method,t_final)\n elif LD_type == 'total':\n string_title = r'Total LD {}, $\\tau={}$'.format(str_method,t_final)\n else: \n string_title = ''\n print('Incorrect \"LD_type\". Valid options: forward, backward, total. Plot will appear without title')\n \n fig.suptitle(string_title, fontsize=14, y=1.04)\n ax0.set_title('LD values')\n ax0.set_xlabel(slice_axes_labels[0])\n ax0.set_ylabel(slice_axes_labels[1])\n \n ticks_LD = np.linspace(np.nanmin(LD), np.nanmax(LD), 11)\n fig.colorbar(con0, ax=ax0, ticks=ticks_LD, format='%.2f')\n \n gradient_x, gradient_y = np.gradient( LD, 0.05, 0.05)\n gradient_magnitude = np.sqrt(gradient_x**2 + gradient_y**2)\n gradient_magnitude = gradient_magnitude/gradient_magnitude.max()\n \n con1 = ax1.contourf(points_ax1, points_ax2, gradient_magnitude, cmap='Reds', levels=200)\n ax1.set_title('LD gradient magnitude')\n ax1.set_xlabel(slice_axes_labels[0])\n ax1.label_outer()\n \n ticks_gradient = np.linspace(np.nanmin(gradient_magnitude), np.nanmax(gradient_magnitude), 11)\n fig.colorbar(con1, ax=ax1, ticks=ticks_gradient, format='%.2f')\n \n plt.show()", "def visualize(fd, pos_tags=None):\n if pos_tags is not None:\n fd = {t: f for t, f in fd.items() if t.pos in pos_tags}\n color = {pos.tag: color.hex for pos, color in COLOR.items()}\n frequencies = sorted(fd.values())\n font_size = rescale(frequencies, range(75, 351))\n html = '\\n'.join(\n f'''<font\n color=\"{color[t.pos]}\"\n title=\"{t.lemma}/{t.pos} ({f})\"\n style=\"font-size: {font_size(f)}%\"\n >\n {t.lemma}\n </font>''' for t, f in fd.items()\n )\n return html", "def generate_gazettes(self):\n # TODO: generate_gazettes\n pass", "def generate(self, diagram):", "def LLEEmbedding(TurosR=10,Torusr=4,Classes=[3,5,7],nei=[5,10,20], DataSet = {'Turos', 'Digits'}):\n\n S, dig = CreateDS_Torus_Digits(TurosR=TurosR,Torusr=Torusr,Classes=[3,5,7])\n ### ------ LLE ------###\n nei = nei\n\n if 'Turos' in DataSet:\n # Plotting Torus\n fig = plt.figure(figsize=(30, 10))\n for i,j in enumerate(nei):\n Torus_LLE = LLE(S,2,j)\n neighbors = j\n method = 'Torus LLE'\n ax = fig.add_subplot(1, len(nei), i + 1)\n scatter = ax.scatter(Torus_LLE[:, 0], Torus_LLE[:, 1], c=S[:, 0:1], cmap=plt.cm.Spectral)\n # legend = ax.legend(*scatter.legend_elements(), loc=\"lower left\", title=\"Classes\")\n # ax.add_artist(legend)\n # ax.legend()\n ax.set_title('{} with {} Neighbours'.format(method, neighbors))\n # making_plot(Torus_LLE, pallete=S[:, 0:1], neighbors=j, method='Torus LLE') #An option to plot single graphs\n plt.savefig('Torus LLE embbeding for {} neighbour'.format(neighbors))\n\n if 'Digits' in DataSet:\n #Plotting Digits\n for Argclass, Specificcalss in enumerate(dig):\n fig = plt.figure(figsize=(30,10))\n for i,j in enumerate(nei):\n neighbors = j\n Digit_LLE = LLE(Specificcalss[0],2,j)\n method = 'Digit LLE'\n ax = fig.add_subplot(1, len(nei), i + 1)\n scatter = ax.scatter(Digit_LLE[:, 0], Digit_LLE[:, 1], c=Specificcalss[1], cmap=plt.cm.Spectral)\n legend = ax.legend(*scatter.legend_elements(), loc=\"lower left\", title=\"Classes\")\n ax.add_artist(legend)\n ax.legend()\n ax.set_title('{} with {} Neighbours'.format(method, neighbors))\n # making_plot(Digit_isomap, Specificcalss[1], neighbors=j, method='Digit ISOMAP') #An option to plot single graphs\n plt.savefig('Digits up to {} - LLE embbeding for {} neighbour'.format(Classes[Argclass], nei))", "def get_grades_d3():\n return render_template(\"grades_d3.html\")", "def markov_story():\n return render_template(\"markovstory.html\")", "def add_publications(generator):\n if 'PUBLICATIONS_SRC' not in generator.settings:\n return\n try:\n from StringIO import StringIO\n except ImportError:\n from io import StringIO\n try:\n from pybtex.database.input.bibtex import Parser\n from pybtex.database.output.bibtex import Writer\n from pybtex.database import BibliographyData, PybtexError\n from pybtex.backends import html\n from pybtex.style.formatting import plain, toplevel\n from pybtex.style.template import (sentence, words,\n optional, optional_field, field, tag)\n from pybtex.richtext import Symbol\n except ImportError:\n logger.warn('`pelican_bibtex` failed to load dependency `pybtex`')\n return\n\n refs_file = generator.settings['PUBLICATIONS_SRC']\n try:\n bibdata_all = Parser().parse_file(refs_file)\n except PybtexError as e:\n logger.warning('`pelican_bibtex` failed to parse file %s: %s' % (\n refs_file,\n str(e)))\n return\n\n class CustomStyle(plain.Style):\n\n def format_bold_title(self, e, which_field, as_sentence=True):\n formatted_title = tag('strong')[field(which_field)]\n if as_sentence:\n return sentence[formatted_title]\n else:\n return formatted_title\n\n def get_inproceedings_template(self, e):\n template = toplevel[\n self.format_bold_title(e, 'title'),\n Symbol('newline'),\n sentence[self.format_names('author')],\n Symbol('newline'),\n words[\n 'In',\n sentence[\n optional[self.format_editor(e, as_sentence=False)],\n self.format_btitle(e, 'booktitle', as_sentence=False),\n self.format_volume_and_series(e, as_sentence=False),\n ],\n self.format_address_organization_publisher_date(e),\n ],\n sentence[optional_field('note')],\n self.format_web_refs(e),\n ]\n return template\n\n def get_article_template(self, e):\n volume_and_pages = first_of[\n # volume and pages, with optional issue number\n optional[\n join[\n field('volume'),\n optional['(', field('number'), ')'],\n ':', pages\n ],\n ],\n # pages only\n words['pages', pages],\n ]\n template = toplevel[\n self.format_bold_title(e, 'title'),\n Symbol('newline'),\n self.format_names('author'),\n Symbol('newline'),\n sentence[\n tag('em')[field('journal')],\n optional[volume_and_pages],\n date],\n sentence[optional_field('note')],\n self.format_web_refs(e),\n ]\n return template\n\n def get_techreport_template(self, e):\n template = toplevel[\n self.format_bold_title(e, 'title'),\n Symbol('newline'),\n sentence[self.format_names('author')],\n Symbol('newline'),\n sentence[\n words[\n first_of[\n optional_field('type'),\n 'Technical Report',\n ],\n optional_field('number'),\n ],\n field('institution'),\n optional_field('address'),\n date,\n ],\n sentence[optional_field('note')],\n self.format_web_refs(e),\n ]\n return template\n\n def format_entry(self, label, entry, bib_data=None):\n return super().format_entry(label, entry, bib_data)\n\n publications = []\n\n # format entries\n my_style = CustomStyle()\n html_backend = html.Backend()\n html_backend.symbols.update({'newline': '<br>'})\n formatted_entries = my_style.format_entries(bibdata_all.entries.values())\n\n for formatted_entry in formatted_entries:\n key = formatted_entry.key\n entry = bibdata_all.entries[key]\n year = entry.fields.get('year')\n # This shouldn't really stay in the field dict\n # but new versions of pybtex don't support pop\n pdf = entry.fields.get('pdf', None)\n slides = entry.fields.get('slides', None)\n poster = entry.fields.get('poster', None)\n entrytype = entry.fields.get('type', None)\n\n # render the bibtex string for the entry\n bib_buf = StringIO()\n bibdata_this = BibliographyData(entries={key: entry})\n Writer().write_stream(bibdata_this, bib_buf)\n text = formatted_entry.text.render(html_backend)\n\n publications.append((key,\n year,\n text,\n bib_buf.getvalue(),\n pdf,\n slides,\n poster,\n entrytype))\n publications.sort(key=itemgetter(1), reverse=True)\n\n generator.context['publications'] = publications", "def gt(text, font=DEFAULT_FONT, color=\"magenta\",\n on_color=None, attr=None, width=80,\n justify=\"center\"):\n\n f = Figlet(\n font, width=width,\n justify=justify\n )\n r = f.renderText(text)\n return colored(r, color, on_color, attr)", "def gen_row_for_html(usage_flag, image_link_template, error_info_template, document_text, image_list,\n image_idx, landmark_name, landmark_worlds, error_summary, picture_folder='./pictures', width=200):\n image_name = image_list[image_idx]\n image_basename = image_name.split('/')[0]\n case_info = r'<b>Case nunmber</b>:{0} : {1} , '.format(image_idx, image_name)\n\n labelled_images = [image_basename + '_label_lm{}_axial.png'.format(landmark_name),\n image_basename + '_label_lm{}_coronal.png'.format(landmark_name),\n image_basename + '_label_lm{}_sagittal.png'.format(landmark_name)]\n labelled_point = landmark_worlds[0]\n \n if usage_flag == 1:\n error_info = error_info_template.format(landmark_worlds[0][0],\n landmark_worlds[0][1],\n landmark_worlds[0][2])\n \n elif usage_flag == 2:\n detected_images = [image_basename + '_detection_lm{}_axial.png'.format(landmark_name),\n image_basename + '_detection_lm{}_coronal.png'.format(landmark_name),\n image_basename + '_detection_lm{}_sagittal.png'.format(landmark_name)]\n detected_point = landmark_worlds[1]\n\n assert error_summary is not None\n x_error = error_summary.error_dx[landmark_name][image_idx]\n y_error = error_summary.error_dy[landmark_name][image_idx]\n z_error = error_summary.error_dz[landmark_name][image_idx]\n l2_error = error_summary.error_l2[landmark_name][image_idx]\n type_error = error_summary.error_type[landmark_name][image_idx]\n error_info = error_info_template.format(labelled_point[0],\n labelled_point[1],\n labelled_point[2],\n detected_point[0],\n detected_point[1],\n detected_point[2],\n type_error,\n x_error,\n y_error,\n z_error,\n l2_error)\n else:\n raise ValueError('Unsupported flag type!')\n\n document_text = add_document_text(document_text, case_info)\n document_text = add_document_text(document_text, error_info)\n \n document_text += \"\\n\"\n document_text = add_document_text(document_text, \"<table border=1><tr>\")\n document_text = add_three_images(document_text, image_link_template, picture_folder, labelled_images, width)\n if usage_flag == 2:\n document_text = add_three_images(document_text, image_link_template, picture_folder, detected_images, width)\n document_text += \"\\n\"\n document_text = add_document_text(document_text, r'</tr></table>')\n\n return document_text", "def markup(self):\n return '%s%s%s' % (\n self.options['markup_prefix'],\n self._markup,\n self.options['markup_suffix'],\n )", "def text_to_pango(self):\n def replace(text):\n components = text.split(\"&\")\n out = components[0]\n for item in components[1:]:\n if item.startswith(\"amp;\") \\\n or (not item.startswith(\"amp;\")\n and html.unescape(f'&{item}') != f'&{item}'):\n out += \"&\" + item\n else:\n out += \"&amp;\" + item\n return out\n\n if \"full_text\" in self.output.keys():\n self.output[\"full_text\"] = replace(self.output[\"full_text\"])\n if \"short_text\" in self.output.keys():\n self.output[\"short_text\"] = replace(self.output[\"short_text\"])", "def test_template_output(self):\n g = microformats.models.geo()\n g.latitude = 37.408183\n g.latitude_description = 'N 37° 24.491'\n g.longitude = -122.13855\n g.longitude_description = 'W 122° 08.313'\n g.save()\n hc = microformats.models.hCard()\n hc.honorific_prefix = 'Mr'\n hc.given_name = 'Joe'\n hc.additional_name = 'Arthur'\n hc.family_name = 'Blogs'\n hc.honorific_suffix = 'PhD'\n hc.url = 'http://acme.com/'\n hc.email_work = 'joe.blogs@acme.com'\n hc.email_home = 'joeblogs2000@home-isp.com'\n hc.tel_work = '+44(0)1234 567890'\n hc.tel_home = '+44(0)1324 234123'\n hc.street_address = '5445 N. 27th Street'\n hc.extended_address = ''\n hc.locality = 'Milwaukee'\n hc.region = 'WI'\n hc.country_name = 'US'\n hc.postal_code = '53209'\n hc.org = \"Acme Corp.\"\n hc.title = 'Vice President'\n hc.save()\n hcl = microformats.models.hCalendar()\n hcl.summary = 'Important Meeting'\n hcl.location = 'BBC in London'\n hcl.url = 'http://www.bbc.co.uk/'\n hcl.dtstart = datetime.datetime(2009, 4, 11, 13, 30)\n hcl.dtend = datetime.datetime(2009, 4, 11, 15, 30)\n hcl.description = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.'\n hcl.street_address = 'Broadcasting House'\n hcl.extended_address = 'Portland Place'\n hcl.locality = 'London'\n hcl.region = ''\n hcl.country_name = 'GB'\n hcl.postal_code = 'W1A 1AA'\n hcl.save()\n u = User.objects.create_user('john', 'john@smith.com', 'password')\n URL = 'http://twitter.com/ntoll'\n tgt = 'Nicholas Tollervey'\n x = microformats.models.xfn()\n x.source = u\n x.target = tgt \n x.url = URL\n x.save()\n xfnv1 = microformats.models.xfn_values.objects.get(value='friend')\n xfnv2 = microformats.models.xfn_values.objects.get(value='met')\n xfnv3 = microformats.models.xfn_values.objects.get(value='colleague')\n x.relationships.add(xfnv1)\n x.relationships.add(xfnv2)\n x.relationships.add(xfnv3)\n x.save()\n g2 = microformats.models.geo()\n g2.latitude = 45.498677\n g2.latitude_description = \"45°34' 13\"\" N\"\n g2.longitude = -73.570260 \n g2.longitude_description = \"73°29' 55\"\" W\" \n g2.save()\n hc2 = microformats.models.hCard()\n hc2.honorific_prefix = 'Mr'\n hc2.given_name = 'John'\n hc2.additional_name = ''\n hc2.family_name = 'Fletcher'\n hc2.honorific_suffix = 'MA(cantab)'\n hc2.url = 'http://lso.co.uk/'\n hc2.tel_work = '+44(0)1234 567456'\n hc2.street_address = 'The Barbican Centre'\n hc2.extended_address = 'Silk Street'\n hc2.locality = 'London'\n hc2.country_name = 'GB'\n hc2.postal_code = 'EC2Y 8DS'\n hc2.org = 'London Symphony Orchestra'\n hc2.title = 'Principal Tuba Player'\n hc2.save()\n hcl2 = microformats.models.hCalendar()\n hcl2.summary = 'Operation Overlord'\n hcl2.location = 'Normandy, France'\n hcl2.url = 'http://en.wikipedia.org/wiki/Operation_Overlord'\n hcl2.dtstart = datetime.datetime(1944, 6, 6)\n hcl2.dtend = datetime.datetime(1944, 8, 30)\n hcl2.description = 'You are about to embark upon the Great Crusade, toward which we have striven these many months. The eyes of the world are upon you. The hopes and prayers of liberty-loving people everywhere march with you. In company with our brave Allies and brothers-in-arms on other Fronts, you will bring about the destruction of the German war machine, the elimination of Nazi tyranny over the oppressed peoples of Europe, and security for ourselves in a free world.'\n hcl2.save()\n listing = microformats.models.hListing()\n listing.listing_action = \"sell\"\n listing.summary = \"Pony requires a good home\"\n listing.description = \"A young pony who answers to the name Django\"\\\n \" requires a new home having outgrown his current host. Easy\"\\\n \" going and fun to play with Django also provides rainbow\"\\\n \" manure that is sure to help the garden grow.\"\n listing.lister_fn = \"John Doe\"\n listing.lister_email = \"john.doe@isp.net\"\n listing.lister_url = \"http://isp.com/django_the_pony\"\n listing.lister_tel = \"+44(0) 1234 567456\"\n listing.dtlisted = datetime.datetime(2009, 5, 6)\n listing.dtexpired = datetime.datetime(2009, 8, 19)\n listing.price = \"£2500 ono\"\n listing.item_fn = \"Django the Pony\"\n listing.item_url = \"http://djangoproject.com/\"\n listing.locality = \"Brighton\"\n listing.country_name = \"GB\"\n listing.save()\n rev1 = microformats.models.hReview()\n rev1.summary=\"Acme's new services rock!\"\n rev1.type='business'\n rev1.description='Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.'\n rev1.rating=4\n rev1.dtreviewed=datetime.datetime(2009,4,10)\n rev1.reviewer='John Smith'\n rev1.fn='Acme Corp'\n rev1.url='http://acme.com'\n rev1.tel='+44(0)1234 567456'\n rev1.street_address = '5445 N. 27th Street'\n rev1.extended_address = ''\n rev1.locality = 'Milwaukee'\n rev1.region = 'WI'\n rev1.country_name = 'US'\n rev1.postal_code = '53209'\n rev1.save()\n rev2 = microformats.models.hReview()\n rev2.summary = 'A phenomenal tuba recital'\n rev2.description = 'Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.'\n rev2.rating=5\n rev2.type='event'\n rev2.reviewer='John Doe'\n rev2.fn='John Fletcher - One man and his Tuba'\n rev2.url='http://www.johnfletcher-tuba.co.uk/'\n rev2.dtstart = datetime.datetime(1987, 10, 3, 19, 30)\n rev2.street_address = 'The Pro Arte Theatre'\n rev2.locality = 'London'\n rev2.save()\n rev3 = microformats.models.hReview()\n rev3.summary = \"Mr Bloggs children's entertainer flops\"\n rev3.description = 'Claritas est etiam processus dynamicus, qui sequitur mutationem consuetudium lectorum. Mirum est notare quam littera gothica, quam nunc putamus parum claram, anteposuerit litterarum formas humanitatis per seacula quarta decima et quinta decima. Eodem modo typi, qui nunc nobis videntur parum clari, fiant sollemnes in futurum.'\n rev3.rating=2\n rev3.type='person'\n rev3.reviewer='Melvyn Bragg'\n rev3.fn='Mr Bloggs'\n rev3.tel='01234 567456'\n rev3.save()\n rev4 = microformats.models.hReview()\n rev4.summary = 'Latest Star-Wars is Sucko-Barfo'\n rev4.description = 'Claritas est etiam processus dynamicus, qui sequitur mutationem consuetudium lectorum. Mirum est notare quam littera gothica, quam nunc putamus parum claram, anteposuerit litterarum formas humanitatis per seacula quarta decima et quinta decima. Eodem modo typi, qui nunc nobis videntur parum clari, fiant sollemnes in futurum.'\n rev4.rating=1\n rev4.type='film'\n rev4.reviewer='Barry Norman'\n rev4.fn='Star Wars - Revenge of the Sith'\n rev4.url='http://www.starwars.com/movies/episode-iii/'\n rev4.save()\n rev5 = microformats.models.hReview()\n rev5.rating=1\n rev5.type='film'\n rev5.fn='Star Wars - The Phantom Menace'\n rev5.save()\n feed = microformats.models.hFeed()\n feed.save()\n entry1 = microformats.models.hEntry()\n entry1.hfeed = feed\n entry1.entry_title = 'Entry 1 Title'\n entry1.entry_content = 'Claritas est etiam processus dynamicus, qui sequitur mutationem consuetudium lectorum. Mirum est notare quam littera gothica, quam nunc putamus parum claram, anteposuerit litterarum formas humanitatis per seacula quarta decima et quinta decima. Eodem modo typi, qui nunc nobis videntur parum clari, fiant sollemnes in futurum.'\n entry1.entry_summary = 'Lorem ipsum dolor sit amet doo-dah whatsit thingymajig'\n entry1.author = 'A.N.Other'\n entry1.bookmark = 'http://website.com/entry1'\n entry1.updated = datetime.datetime(2009, 6, 1)\n entry1.save()\n entry2 = microformats.models.hEntry()\n entry2.hfeed = feed\n entry2.entry_title = 'Entry 2 Title'\n entry2.entry_content = 'Claritas est etiam processus dynamicus, qui sequitur mutationem consuetudium lectorum. Mirum est notare quam littera gothica, quam nunc putamus parum claram, anteposuerit litterarum formas humanitatis per seacula quarta decima et quinta decima. Eodem modo typi, qui nunc nobis videntur parum clari, fiant sollemnes in futurum.'\n entry2.entry_summary = 'Lorem ipsum dolor sit amet doo-dah whatsit thingymajig'\n entry2.author = 'Sidney Humphries'\n entry2.bookmark = 'http://website.com/entry2'\n entry2.updated = datetime.datetime(2009, 3, 14)\n entry2.save()\n entry3 = microformats.models.hEntry()\n entry3.hfeed = feed\n entry3.entry_title = 'Entry 3 Title'\n entry3.entry_content = 'Claritas est etiam processus dynamicus, qui sequitur mutationem consuetudium lectorum. Mirum est notare quam littera gothica, quam nunc putamus parum claram, anteposuerit litterarum formas humanitatis per seacula quarta decima et quinta decima. Eodem modo typi, qui nunc nobis videntur parum clari, fiant sollemnes in futurum.'\n entry3.entry_summary = 'Lorem ipsum dolor sit amet doo-dah whatsit thingymajig'\n entry3.author = 'Nicholas Hawkesmoor'\n entry3.bookmark = 'http://website.com/entry3'\n entry3.updated = datetime.datetime(2008, 12, 28)\n entry3.save()\n entry4 = microformats.models.hEntry()\n entry4.entry_title = 'Entry 4 Title'\n entry4.entry_content = 'Claritas est etiam processus dynamicus, qui sequitur mutationem consuetudium lectorum. Mirum est notare quam littera gothica, quam nunc putamus parum claram, anteposuerit litterarum formas humanitatis per seacula quarta decima et quinta decima. Eodem modo typi, qui nunc nobis videntur parum clari, fiant sollemnes in futurum.'\n entry4.entry_summary = 'Lorem ipsum dolor sit amet doo-dah whatsit thingymajig'\n entry4.author = 'Fred Blogs'\n entry4.bookmark = 'http://website.com/entry4'\n entry4.updated = datetime.datetime(2008, 11, 15)\n entry4.save()\n item1 = microformats.models.hNews()\n item1.hfeed = feed\n item1.entry_title = 'L.A. Icon Otis Chandler Dies at 78'\n item1.entry_content = 'Otis Chandler, whose vision and determination as publisher of the Los Angeles Times from 1960 to 1980 catapulted the paper from mediocrity into the front ranks of American journalism, died today of a degenerative illness called Lewy body disease. He was 78.'\n item1.entry_summary = 'An obituary of Los Angeles Times Publisher Otis Chandler'\n item1.author = 'David Shaw and Mitchell Landsberg'\n item1.bookmark = 'http://www.latimes.com/news/local/la-me-chandler-obit,0,7195252.story'\n item1.updated = datetime.datetime(2006, 2, 27)\n item1.source_org = 'Los Angeles Times'\n item1.source_url = 'http://www.latimes.com'\n item1.principles_url = 'http://www.latimes.com/news/nationworld/nation/la-0705lat_ethics_code-pdf,0,7257671.acrobat'\n item1.license_url = 'http://www.latimes.com/services/site/lat-terms,0,6713384.htmlstory'\n item1.license_description = 'Terms of service'\n item1.locality = 'Los Angeles'\n item1.country_name = 'US'\n item1.longitude = -118.2666667\n item1.latitude = 34.0444444\n item1.save()\n\n # All the data is defined so lets render the test template...\n template = get_template('test.html')\n data = {\n 'contact': hc,\n 'loc': g,\n 'event': hcl, \n 'listing': listing,\n 'review1': rev1,\n 'review2': rev2,\n 'review3': rev3,\n 'review4': rev4,\n 'review5': rev5,\n 'person': x,\n 'c2': hc2,\n 'loc2': g2,\n 'event2': hcl2,\n 'feed': feed,\n 'entry': entry4,\n 'item': item1,\n }\n context = Context(data)\n import html_test\n path = os.path.dirname(html_test.__file__)\n outfile = codecs.open(os.path.join(path, 'microformat_test.html'), 'w', 'utf-8')\n outfile.write(template.render(context))\n outfile.close()", "def __repr__(self):\n\n return \"\\n Glitz ID: {} Title: {}\\n\".format(self.glitz_id,\n self.title)", "def get_kritis(self, kriti_list):\n self.kritis = [[k.name, k.composer, k.link] for k in kriti_list if \n k.raga == self.name]", "def genLattice(structure,in_network,dim,supercell,prec=1E-4,\n seed_index=0,c_mag=60,y_dist=-1):\n\n # Generate vectors in plane/line, relative to\n # the first atom in the network of atoms\n \n if y_dist==-1:\n y_dist=c_mag/3\n \n new = [x for x in in_network if abs(x[2])<np.pi/2]\n return_structure=False\n mat = np.array(structure.lattice.as_dict()['matrix'])\n coords = np.array([np.dot(mat.T,x.frac_coords%1) for x in structure.sites])\n specs = structure.species\n ref_ele_d = getUniqueCount(specs)\n for i in ref_ele_d:\n ref_ele_d[i]/=(supercell**dim)\n coords = coords-coords[seed_index]\n \n\n\n\n\n for lat_vectors in sorted(new,key=itemgetter(3)):\n\n # Create lattice matrix to fit atomic coordinates against\n # In 2D\n if dim==2:\n new_c = np.cross(lat_vectors[0],lat_vectors[1])\n scale_c = c_mag/magni(new_c)\n\n latt_attempt = np.array([lat_vectors[0],lat_vectors[1],\\\n new_c*scale_c])\n \n # In 1D\n elif dim==1:\n unitV = lat_vectors[0]/magni(lat_vectors[0])\n if unitV[0]==0:\n perp1 = [1,0,0]\n elif unitV[1]==0:\n perp1 = [0,1,0]\n elif unitV[2]==0:\n perp1 = [0,0,1]\n else:\n perp1 = [1,1,-1*(unitV[0]+unitV[1])/unitV[2]]\n perp1 = perp1/np.linalg.norm(perp1)*c_mag\n perp2 = np.cross(unitV,perp1)\n perp2 = perp2/np.linalg.norm(perp2)*c_mag\n latt_attempt = np.array([lat_vectors[0],perp1,perp2])\n \n # Fit atomic sites to new lattice\n temp_fracs = np.linalg.solve(latt_attempt.T,np.array(coords).T)\n \n \n\n # Make list of all fractional positions, ignoring\n # which axis\n new_fracs = list([list(x) for x in temp_fracs.T])\n\n if len([x for x in np.array(new_fracs).T if \n np.all([(y>=0 and y<1) for y in np.around(x[:dim],3)]) and\n np.all([(y>=-y_dist/c_mag and y<y_dist/c_mag) for \n y in np.around(x[dim:],3)])])==len(new_fracs[0])/supercell**dim:\n \n fit_fracs=[]\n new_fracs_t = np.around(new_fracs.T,6)\n for i in range(len(new_fracs[0])):\n if np.all([(y>=0 and y<1) for y in np.around(new_fracs_t[i][:dim],3)]) \\\n and np.all([(y>=-y_dist/c_mag and y<y_dist/c_mag) \n for y in np.around(new_fracs_t[i][dim:],3)]):\n fit_fracs.append([new_fracs_t[i],specs[i]])\n fit_fracs = np.array(fit_fracs).T\n new_ele_d = getUniqueCount(fit_fracs[1])\n unequal=False\n for k in new_ele_d:\n if new_ele_d[k]!=ref_ele_d[k]:\n unequal=True\n\n break\n if not unequal:\n\n return_structure=True\n break\n\n\n\n # If match found\n if return_structure:\n return(np.array(latt_attempt),fit_fracs)\n # If no match found\n else:\n return([],[])", "def main_piece(self, parent, um, info=\"Patron de T-shirt\", front=True):\n piece_group = inkex.etree.SubElement(parent, 'g',\n {inkex.addNS('label', 'inkscape'): info,\n 'transform': '' if front else 'matrix(-1,0,0,1,-34.745039,0)'})\n\n # The template main vertexes absolute positions\n neck_drop = um['neck_rear'] if not front else um['neck_front'] if um['neck_front'] > 0 else um['neck']\n vertexes = {\n 'neck': (um['neck'], 0),\n 'neck_drop': (0, neck_drop),\n 'shoulder': (um['shoulder'], um['shoulder_drop']),\n 'chest': (um['chest'], um['hsp_chest']),\n 'waist': (um['waist'], um['hsp_waist']),\n 'hip': (um['hip'], um['hsp_hip'])\n }\n\n # The Template structure reference\n if self.options.grid:\n reference = inkex.etree.SubElement(piece_group, 'g',\n {inkex.addNS('label', 'inkscape'): info + \"_structure\"})\n\n draw_svg_line([(0, 0), (0, um['hsp_hip'])], reference, self.doted_line)\n draw_svg_line([(0, 0), (um['neck'], 0)], reference, self.doted_line)\n draw_svg_line([(um['neck'], 0), (0, um['hsp_hip'])], reference, self.doted_line)\n draw_svg_line([(0, um['shoulder_drop']), (um['shoulder'], 0)], reference, self.doted_line)\n draw_svg_line([(0, um['hsp_chest']), (um['chest'], 0)], reference, self.doted_line)\n draw_svg_line([(0, um['hsp_waist']), (um['waist'], 0)], reference, self.doted_line)\n draw_svg_line([(0, um['hsp_hip']), (um['hip'], 0)], reference, self.doted_line)\n\n for name, vertex in vertexes.items():\n draw_svg_circle(self.getunittouu('4mm'), vertex, reference, self.normal_line)\n\n # Template edge paths\n if self.options.temp:\n\n line_style = self.normal_line if self.options.style == 'print' else self.cut_line\n edge = inkex.etree.SubElement(piece_group, 'g', {inkex.addNS('label', 'inkscape'): info + \"_edge\"})\n\n # Building the path string description 'd'\n path = [['m', vertexes['neck']]]\n path.append(Patron.neckline(um, neck_drop))\n path.append(['l', [0, um['hsp_hip'] - neck_drop]])\n path.append(['l', [um['hip'], 0]])\n path.append(Patron.waist_curve(um))\n path.append(self.sleeve_curve(um))\n path.append(['Z', []])\n\n sewing_attribs = {\n 'style': simplestyle.formatStyle(self.normal_line),\n inkex.addNS('label', 'inkscape'): info + '_sewing',\n 'd': formatPath(path)}\n inkex.etree.SubElement(edge, inkex.addNS('path', 'svg'), sewing_attribs)\n\n path[2][1] = [0, um['hsp_hip'] + self.getunittouu('1.5cm') - neck_drop]\n path[3][1] = [um['hip'], 0, 0, -self.getunittouu('1.5cm')]\n offset_attribs = {'style': simplestyle.formatStyle(line_style),\n inkex.addNS('type', 'sodipodi'): 'inkscape:offset',\n inkex.addNS('radius', 'inkscape'): str(self.getunittouu('1cm')),\n inkex.addNS('original', 'inkscape'): formatPath(path)\n }\n inkex.etree.SubElement(edge, inkex.addNS('path', 'svg'), offset_attribs)", "def _repr_latex_(self, name=None, dist=None):\n return None", "def tikzcode(self):\n tex = \"\"\n tex += r\"\\draw\"\n if len(self.options):\n options = ', '.join(self.options)\n tex += \"[{options}] \".format(options=options)\n tex += \"({a.xpos:.4f},{a.ypos:.4f}) \".format(a=self.node_a)\n tex += \"to\"\n # if the nodes are arranged, then they have angle in/out\n inout = []\n inout.append('out={angle!s}'.format(angle=self.node_a.angle_inout))\n inout.append('in={angle!s}'.format(angle=self.node_b.angle_inout))\n if inout:\n tex += \"[\" + \", \".join(inout) + \"] \"\n tex += \"({b.xpos:.4f},{b.ypos:.4f})\".format(b=self.node_b)\n tex += \";\\n\"\n return tex", "def initFormat(self):\n self.html = True", "def initFormat(self):\n self.html = True", "def machinelearn2():\n return render_template('frontml.html')", "def format_stub_annotation(frag):\n stack = []\n base = ['Hex', 'HexNAc']\n for k in sorted(frag.glycosylation, key=lambda x: x.mass(), reverse=True):\n if k not in base:\n base.append(k)\n for k in base:\n v = frag.glycosylation[k]\n if not v:\n continue\n stack.append(f\" {monosaccharide_to_symbol[k]}{v}\")\n stack.append(\"Pep\")\n return '\\n'.join(stack)", "def _add_template(self):\n template_dir = os.path.join(self.label_path, 'standard',\n 'MNI152_T1_2mm_brain.nii.gz')\n template_name = QFileDialog.getOpenFileName(\n self,\n 'Open standard file',\n template_dir,\n 'Nifti files (*.nii.gz *.nii)')\n if not template_name.isEmpty():\n template_path = str(template_name)\n self._add_img(template_path)", "def rep_legtags(text, footnotes):\n textstring = text\n tagsintext = []\n taggedtextlist = []\n tagtextpat = re.compile(r'\\[/[a-z]\\]')\n tagtextitir = tagtextpat.finditer(textstring)\n for tagfound in tagtextitir:\n closetag = tagfound.group()\n opentag = \"[\" + closetag[2:]\n tag = opentag[1:-1]\n tagsintext.append(tag)\n tagtextlist = get_tagtext(textstring, tag)\n for taggedtext in tagtextlist:\n tagstring = opentag + taggedtext + closetag\n taggedtextlist.append(tagstring)\n for tag in tagsintext:\n tagplace = tagsintext.index(tag)\n replacetext = taggedtextlist[tagplace]\n for footnote in footnotes:\n if footnote[:2] == tag + \" \":\n if \"[/LRep]\" in footnote:\n replacementlist = get_tagtext(footnote, \"LRep\")\n repstring = \"[LRep]\" + replacementlist[0] + \"[/LRep]\"\n textstringlist = textstring.split(replacetext)\n textstring = repstring.join(textstringlist)\n return textstring", "def label_paragraphs(root_el, fastcase_data):\n # case metadata\n citations = [alphanum_lower(\" \".join((c[\"Volume\"], c[\"Reporter\"], c[\"Page\"]) + ((c[\"Suffix\"],) if \"Suffix\" in c else ()))) for c in fastcase_data['Citations']]\n name_clean = alphanum_lower(fastcase_data['PartyHeader']) if fastcase_data['PartyHeader'] else None\n court_clean = alphanum_lower(fastcase_data['CourtName'] or fastcase_data['CourtAbbreviation'])\n docket_numbers_clean = [alphanum_lower(d) for d in fastcase_data['DocketNumbers']]\n\n # via https://github.com/harvard-lil/CaselawAccessProjectSchemas/blob/master/casebodyxml/v1/casebodyxml.xsd\n states = {k:i for i, k in enumerate([None, \"citation\", \"parties\", \"docketnumber\", \"court\", \"otherdate\", \"decisiondate\", \"history\", \"syllabus\", \"attorneys\", \"judges\", \"disposition\", \"_opinionstart\", \"_preauthor\", \"author\", \"opinion\"])}\n reverse_states = {v:k for k, v in states.items()}\n\n state = 0\n header_els = []\n opinions = [[]]\n header_complete = False\n extra_els = []\n blank_els = []\n authors = []\n opinion_starts = []\n paragraph_id = 1\n\n def shift_to_opinion(i):\n \"\"\"Move i elements from the end of header to the start of opinion.\"\"\"\n if not i:\n return\n nonlocal header_els\n opinions[0][0:0] = header_els[-i:]\n header_els = header_els[:-i]\n\n def add_el(el, state, target_list=header_els):\n nonlocal blank_els, paragraph_id\n if state:\n if not reverse_states[state].startswith('_'):\n el.attrib['class'] = reverse_states[state]\n if state == states['_opinionstart']:\n opinion_starts.append((len(target_list), el))\n elif state == states['author']:\n authors.append((len(target_list), el))\n blank_els = []\n else:\n blank_els.append(el)\n el.attrib['id'] = f'p-{paragraph_id}'\n paragraph_id += 1\n target_list.append(el)\n\n def append_to_previous(line):\n PyQuery(header_els[-1]).append(PyQuery(line))\n\n for el_pq in PyQuery(root_el)('root').children().items():\n\n if extra_els:\n extra_els.append(el_pq)\n el_pq = extra_els.pop(0)\n\n el = el_pq[0]\n\n # mark the end of the labeled front matter (which may or may not align with actual end)\n if el.tag == 'header-end':\n header_complete = True\n if state == states[\"author\"]:\n state = states[\"opinion\"]\n continue\n\n # skip\n if el.text == \"COPYRIGHT MATERIAL OMITTED\":\n continue\n\n # add linebreak after element for indentation\n if not (el.tail and el.tail.startswith('\\n')):\n el.tail = '\\n' + (el.tail or '')\n\n line = inner_html(el)\n line_text = strip_tags(line)\n line_text_lower = line_text.lower()\n line_alphanum_chars = alphanum_lower(line_text)\n\n # if we've had 5 regular paragraphs in a row, assume we missed the start of the opinion\n if state < states[\"opinion\"] and len(blank_els) >= 5:\n shift_to_opinion(len(blank_els))\n state = states[\"opinion\"]\n\n # we have now reached the opinion and no longer have to process header lines\n if state >= states[\"opinion\"]:\n # check short lines for the start of a concurrence or dissent\n m = new_opinion_re.match(line_text)\n if m:\n el.attrib['class'] = 'author'\n el.attrib['opinion-type'] = opinion_type_lookup[m[1].lower()]\n opinions.append([])\n\n add_el(el, 0, opinions[-1])\n continue\n\n # citation\n if state <= states[\"citation\"]:\n if any(c in line_alphanum_chars for c in citations) or all(citation_like_re.match(s) for s in line.split('<br>')):\n state = states[\"citation\"]\n continue # don't include citation lines in output\n\n # parties\n if state < states[\"parties\"]:\n # special case -- if the case doesn't have a name, like NE2d/939/939ne2d586.xml,\n # assume that whatever comes after the last citation is the name\n if name_clean is None or line_alphanum_chars == name_clean:\n state = states[\"parties\"]\n add_el(el, state)\n elif header_els and name_clean == alphanum_lower(inner_html(header_els[-1]) + line):\n # handle edge case where name is split across two paragraphs\n append_to_previous(line)\n elif line_alphanum_chars.startswith(name_clean) or similar_strings(line_text, fastcase_data['PartyHeader']):\n # special cases -- NW2d/881/881 N.W.2d 813-4_Replace.xml, NW2d/792/792NW2d203.xml\n state = states[\"parties\"]\n add_el(el, state)\n else:\n # if we haven't found a valid name yet, paragraphs are just regular paragraphs\n add_el(el, 0)\n continue\n\n # docket numbers or court\n if state < states[\"court\"]:\n # detect 'Supreme Judicial Court of Massachusetts.' and 'United States Bankruptcy Appellate Panel of the Ninth Circuit.' as a court, but not\n # 'Court of Appeals Case No. 04A03-1707-IF-1724' or 'Consol. Court No. 16-00054'\n # line may be 'Court of Appeals of Virginia, Chesapeake.' if court is 'Court of Appeals of Virginia'\n # line may be 'North Carolina Court of Appeals.' if court is 'Court of Appeals of North Carolina'\n # if 'court' in line.lower() or 'panel' in line.lower()) and ('No.' not in line or 'Division No.' in line):\n if any(line_alphanum_chars.startswith(s) for s in docket_numbers_clean):\n state = states[\"docketnumber\"]\n elif line_alphanum_chars.startswith(court_clean) or (\n (line_text.endswith('Court of Appeals.') or any(line_text_lower.startswith(s) for s in ('court of appeal', 'supreme court')))\n ):\n state = states[\"court\"]\n else:\n state = states[\"docketnumber\"]\n add_el(el, state)\n continue\n\n # accidental start of opinion included in head matter\n # NW2d/737/737NW2d768_3New.xml -- \"On order of the Court ...\"\n if state >= states[\"decisiondate\"]:\n if line_text.startswith(\"On order of the Court\"):\n state = states[\"opinion\"]\n add_el(el, 0, opinions[-1])\n continue\n\n # dates\n # 'DATED at Olympia, Washington, this 31st day of October, 2018.'\n # '01-04-2017'\n if state <= states[\"decisiondate\"]:\n # long line isn't decision date -- SCt/134/134sct985_2.xml\n if len(line_text) < 80 and (date_re.search(line_text) or line_text_lower.startswith('dated at') or re.match(r'\\d{1,2}-\\d{2}-\\d{4}$', line_text)):\n if any(line_text.startswith(s) for s in ('Released', 'Submitted', 'Dissenting')) and 'Decided' not in line_text:\n # handle case like\n # 'Submitted June 5, 2007, at Lansing.'\n # 'Decided June 12, 2007, at 9:05 a.m.'\n # 'Released for Publication October 11, 2007\n # 'Dissenting Opinion of Chief Justice Maynard June 27, 2008.'\n # avoid\n # 'Submitted March 2, 2010.<br>Decided April 2, 2010.'\n state = states[\"otherdate\"]\n else:\n state = states[\"decisiondate\"]\n add_el(el, state)\n continue\n\n if state < states[\"judges\"]:\n # strip off judges lines appended to current line, and add as an extra_el\n # \"for Respondent.<strong>Justice BEATTY.</strong></p>\" SE2d/708/708se2d750.xml\n # \"... West Virginia Insurance Federation.<strong>DAVIS, Justice:</strong></p>\" SE2d/719/719se2d830.xml\n # \"for appellees.<strong>Present: HUMPHREYS, McCLANAHAN and BEALES, JJ.</strong><strong>BEALES, Judge.</strong>\" SE2d/708/708se2d429.xml\n while True:\n m = re.search('(.+)(<strong>([^<]+)</strong>)$', line)\n if m and is_judges_or_author(m[3]):\n extra_els.insert(0, PyQuery('<p>'+m[2]+'</p>'))\n line = m[1]\n el_pq.html(line)\n line_text = strip_tags(line)\n line_alphanum_chars = alphanum_lower(line_text)\n continue\n break\n\n # history\n # 'Appeal by defendant from judgment entered 8 December 2004 by Judge Robert H. Hobgood in Alamance County Superior Court. Heard in the Court of Appeals 2 November 2005.'\n if line_text_lower.startswith('appeal') or any(s in line_text for s in ('Superior Court', 'District Court', 'Circuit Court')):\n state = states[\"history\"]\n add_el(el, state)\n continue\n\n # syllabus\n if 'Syllabus by the Court' in line_text or (state == states[\"syllabus\"] and re.match(r'\\d+\\.|[a-z\\[]', line_text)):\n if re.match(r'[a-z\\[]', line_text):\n # handle case where syllabus is split midsentence\n append_to_previous(line)\n else:\n state = states[\"syllabus\"]\n add_el(el, state)\n continue\n\n # attorneys\n # 'Garrett D. Blanchfield, Jr., Reinhardt Wendorf & Blanchfield, St. Paul, MN, for Appellants.'\n if any(line_text.startswith(s) for s in (\"An amicus\", \"For the\", \"On behalf of\")) or any(s in line_text for s in (' for ', 'amici curiae', 'pro se')):\n state = states[\"attorneys\"]\n add_el(el, state)\n continue\n\n # titles that mark the start of an opinion, like \"OPINION\"\n if line_alphanum_chars in opinion_start_lines or any(line_alphanum_chars.startswith(s) for s in opinion_start_line_prefixes):\n state = states[\"_opinionstart\"]\n if line_text != \"OPINION\":\n add_el(el, state)\n continue\n\n # Handle paragraph that is definitely followed by author, like \"The opinion of the court was delivered by\", A3d/148/148 A.3d 441_Replace.xml\n if line_text == \"The opinion of the court was delivered by\":\n state = states[\"_preauthor\"]\n add_el(el, 0)\n continue\n if state == states[\"_preauthor\"]:\n add_el(el, states[\"author\"])\n state = states[\"opinion\"]\n continue\n\n # author\n # note, in theory fastcase_data[\"Author\"] could be useful for identifying author paragraph, but it's often not set,\n # and when it is it can also appear in the judges line and other places ...\n judges_or_author = is_judges_or_author(line_text)\n if judges_or_author == \"judges\":\n state = states[\"judges\"]\n add_el(el, state)\n continue\n elif judges_or_author == \"author\":\n add_el(el, states[\"author\"])\n state = states[\"opinion\"] if header_complete else states[\"author\"]\n continue\n\n # weird special case where there's an order provided before the start of the opinion\n # E.g. NW2d/740/740NW2d659_1.xml, 'ORDER ENTERED JUNE 8, 2007' and subsequent unlabeled lines\n if line_text.startswith(\"ORDER ENTERED\") or state == states[\"disposition\"]:\n state = states[\"disposition\"]\n add_el(el, state)\n continue\n\n # regular paragraph\n add_el(el, 0)\n continue\n\n # fixups\n labels = [el.attrib.get('class') for el in header_els]\n # rewrite special case like NE2d/944/944ne2d1119.xml:\n # [['parties', '...'],\n # ['docketnumber', 'Feb. 15'],\n # ['docketnumber', '2011.'],\n # ['court', 'Court of Appeals of New York.']]\n # to\n # [['parties', '...'],\n # ['court', 'Court of Appeals of New York.'],\n # ['decisiondate', 'Feb. 15, 2011.']]\n if labels == [None, 'docketnumber', 'docketnumber', 'court']:\n docket_combined = header_els[1].text + \", \" + header_els[2].text\n if date_re.match(docket_combined):\n header_els[1].attrib['class'] = 'decisiondate'\n header_els[1].text = docket_combined\n header_els = [header_els[0], header_els[3], header_els[1]]\n\n # change all author labels but the last to judges; we likely misdetected one earlier\n for i, el in authors[:-1]:\n el.attrib['class'] = \"judges\"\n\n # if we didn't find an author and the last line is unlabeled, assume that's the author with a typo --\n # e.g. NW2d/753/753NW2d552_1.xml , missing comma\n if header_els and not authors and not opinion_starts and state >= states[\"judges\"] and header_els[-1].attrib.get('class') is None:\n header_els[-1].attrib['class'] = \"author\"\n authors = [(len(header_els)-1, header_els[-1])]\n\n # move author, and any paragraphs after it, to beginning of first opinion\n move_index = opinion_starts[0][0] + 1 if opinion_starts else authors[-1][0] if authors else None\n if move_index is not None:\n shift_to_opinion(len(header_els)-move_index)\n\n return header_els, opinions", "def render(self):", "def _linked_feature_label(linked_feature):\n\treturn \"\"\"<\n <B>{name}</B><BR />\n F={num_features} D={projected_dim}<BR />\n {fml}<BR />\n <U>{source_translator}</U><BR />\n <I>{source_layer}</I>\n >\"\"\".format(\n\t\tname=linked_feature.name, num_features=linked_feature.size, projected_dim=linked_feature.embedding_dim, fml=linked_feature.fml, source_translator=linked_feature.source_translator, source_layer=linked_feature.source_layer\n\t)", "def get_template(self):\n return self.sep.join([self.htmls[html] for html in self.lang]).format(**self.fields)", "def render_seal(img, text):\n return None", "def render(self):\n canvas_id = 'zdog_{}'.format(self.CANVAS_INDEX)\n illo_id = 'illo_{}'.format(self.CANVAS_INDEX)\n Scene.CANVAS_INDEX += 1\n\n html_lines = []\n\n js_lines = []\n\n euler = -rowan.to_euler(\n self.rotation, convention='xyz', axis_type='intrinsic')\n translation = self.translation*(1, -1, 1)\n\n pan_cfg = self.get_feature_config('pan')\n pan = pan_cfg.get('value', True) if pan_cfg is not None else False\n\n js_lines.append(\"\"\"\n let {illo_id} = new Zdog.Illustration({{\n element: '#{canvas_id}',\n zoom: {zoom},\n dragRotate: {rotation_enabled},\n rotate: {{x: {angle[0]}, y: {angle[1]}, z: {angle[2]}}},\n translate: {{x: {pos[0]}, y: {pos[1]}, z: {pos[2]}}},\n }});\n \"\"\".format(\n illo_id=illo_id, canvas_id=canvas_id, zoom=self.zoom*self.pixel_scale,\n angle=euler, pos=translation,\n rotation_enabled=('false' if pan else 'true')))\n\n config = self.get_feature_config('ambient_light')\n ambient_light = 0 if config is None else config.get('value', .4)\n\n config = self.get_feature_config('directional_light')\n directional_light = ([(0, 0, 0)] if config is None else\n config.get('value', [(0, 0, 0)]))\n directional_light = np.atleast_2d(directional_light)\n\n shapeIndex = 0\n for i, prim in enumerate(self._primitives):\n js_lines.extend(prim.render(\n rotation=self.rotation, illo_id=illo_id,\n name_suffix=i, ambient_light=ambient_light,\n directional_light=directional_light))\n\n (width, height) = map(int, self.size_pixels)\n html_lines.append(\"\"\"\n <canvas id=\"{canvas_id}\" width=\"{width}\" height=\"{height}\"></canvas>\n \"\"\".format(canvas_id=canvas_id, width=width, height=height))\n\n html_lines.append(\"\"\"<script>\n var fill_{canvas_id} = function() {{\n \"\"\".format(canvas_id=canvas_id))\n html_lines.append(LOCAL_HELPER_SCRIPT)\n html_lines.extend(js_lines)\n\n pan_snippet = \"\"\"\n new Zdog.Dragger({{\n startElement: {illo_id}.element,\n onDragStart: function( pointer, moveX, moveY) {{\n this.lastX = 0;\n this.lastY = 0;\n }},\n onDragMove: function( pointer, moveX, moveY ) {{\n let deltax = moveX - this.lastX;\n let deltay = moveY - this.lastY;\n let scale = 1.0/{illo_id}.zoom;\n {illo_id}.translate.x += deltax*scale;\n {illo_id}.translate.y += deltay*scale;\n this.lastX = moveX;\n this.lastY = moveY;\n }}\n }});\"\"\".format(illo_id=illo_id)\n if pan:\n html_lines.append(pan_snippet)\n\n html_lines.append(\"\"\"\n let this_canvas = document.querySelector(\"#{canvas_id}\");\n \"\"\".format(canvas_id=canvas_id))\n html_lines.append(\"\"\"\n let animate_{canvas_id} = function() {{\n if(is_in_view(this_canvas))\n {{\n {illo_id}.updateRenderGraph();\n }}\n if(document.contains(this_canvas))\n {{\n requestAnimationFrame(animate_{canvas_id});\n }}\n }};\n animate_{canvas_id}();\"\"\".format(canvas_id=canvas_id, illo_id=illo_id))\n # remove the global reference to this function after using it\n html_lines.append('fill_{canvas_id} = null;'.format(canvas_id=canvas_id))\n html_lines.append('};') # end of fill_{canvas_id}\n # now call fill_{canvas_id}, possibly after loading zdog\n html_lines.append(\"\"\"\n if (typeof Zdog == 'undefined')\n {{\n var script = document.createElement('script');\n script.addEventListener('load', fill_{canvas_id}, false);\n script.src = 'https://unpkg.com/zdog@1/dist/zdog.dist.min.js';\n document.getElementsByTagName('head')[0].appendChild(script);\n }}\n else\n fill_{canvas_id}();\n \"\"\".format(canvas_id=canvas_id))\n html_lines.append('</script>')\n\n return '\\n'.join(html_lines)", "def __repr__(self):\n return self.textual_representation().encode(\"utf-8\")", "def visualizemap(dna, map_view=\"linear\", feature_list=None, start=0, end=None,label_location=None, display_label=2, display_title=True, display_axis=True, fontsize=None, fontsize_nucl=None, \n tick_interval=\"auto\", labelcolor=\"k\", title=None, width_scale=\"auto\", height_scale=1.0, linebreak=None, seq=False, rcseq=False, diamater_scale=1.0, fig= None):\n\n if fontsize is None and map_view == \"linear\":\n fontsize = 12\n elif fontsize is None and map_view == \"circular\":\n fontsize = 10\n else:\n pass \n\n if title is None or title == \"\":\n display_titlee = False\n\n #if map_view == \"circular\":\n #feature_list.sort(key=lambda x:len(dna.printsequence(x.start, x.end)))\n \n standard_scale = 4000\n if map_view == \"circular\":\n figo, ax1, ax2= vc.visualize(dna, format=0, feature_list=feature_list, bottom=400 * diamater_scale, label_visible=display_label, fontsize=fontsize, \n title_visible=display_title, axis_visible=display_axis, tick_space=tick_interval, labelcolor=labelcolor, \n titlename=title, fig=fig)\n try:\n import patchworklib \n _patchworklib = True\n except:\n _patchworklib = False\n \n if _patchworklib == True:\n ax1 = patchworklib.cBrick(ax=ax1)\n ax2 = patchworklib.Brick(ax=ax2)\n if fig == patchworklib.Brick._figure or fig is None:\n return patchworklib.Bricks({ax1.get_label():ax1, ax2.get_label():ax2}) \n else:\n return figo\n else:\n return figo\n else:\n if feature_list is None:\n feature_list = dna.dnafeatures\n figo, ax = vl.visualize(dna, start=start, end=end, feature_list=feature_list, wrap_width=linebreak, annotation_loc=label_location, unvisible_types=[\"source\"], \n visible_types=[], enlarge_w=width_scale, enlarge_h=height_scale, fontsize=fontsize, fontsize_nucl=fontsize_nucl, with_seq=seq, with_seqr=rcseq, nucl_char=None, nucl_color_dict=None, \n label_visible=display_label, scale=\"fix\", title_visible=display_title, axis_visible=display_axis, tick_space=tick_interval, \n labelcolor=labelcolor, titlename=title, fig=fig)\n try:\n import patchworklib \n _patchworklib = True\n except:\n _patchworklib = False\n \n if _patchworklib == True:\n if fig == patchworklib.Brick._figure or fig is None:\n return ax\n else:\n return figo \n else:\n return figo", "def features():\n\n return render_template('features.html')", "def get_contacts_plots(itype, ligandonly): \n\n # Creating set_itypes and loading data\n if itype == \"all\":\n set_itypes = set((\"sb\", \"pc\", \"ps\", \"ts\", \"vdw\", \"hp\", \"hb\", \"hbbb\", \"hbsb\", \"hbss\", \"wb\", \"wb2\", \"hbls\", \"hblb\", \"all\"))\n df_raw = None\n for itype_df in set_itypes:\n df_raw_itype = pd.read_csv(str(basepath + \"contact_tables/compare_\" + itype_df + \".tsv\"), sep=\"\\s+\")\n df_raw = pd.concat([df_raw, df_raw_itype])\n else: \n set_itypes = { itype }\n df_raw = pd.read_csv(str(basepath + \"contact_tables/compare_\" + itype + \".tsv\"), sep=\"\\s+\")\n\n print(\"Computing contmaps inputs for %s-%s\" % (itype, ligandonly))\n\n #Loading files\n compl_data = json_dict(str(basepath + \"compl_info.json\"))\n flare_template = json_dict(basepath + \"template.json\")\n\n # Adapting to Mariona's format\n df_original = adapt_to_marionas(df_raw)\n\n # If is working with total frequency and all interaction partners, create a new flareplot template file\n if (itype=='all') and (ligandonly=='prt_lg'):\n flareplot_template(df_original, basepath)\n \n # Filtering out non-ligand interactions if option ligandonly is True\n if ligandonly == \"lg\":\n ligandfilter = df_original['Position'].str.contains('Ligand')\n df_original = df_original[ligandfilter]\n elif ligandonly == \"prt\":\n ligandfilter = ~df_original['Position'].str.contains('Ligand')\n df_original = df_original[ligandfilter]\n\n df_original = filter_same_helix(df_original)\n\n #Removing low-frequency contacts\n df_original = filter_lowfreq(df_original, itype)\n\n #Add \\n between GPCR nomenclatures, to show it multiline in the heatmap axis \n df_original = set_new_axis(df_original)\n\n # Excluding non-standard (and by standard I'm saying \"made by us\", in the simulation rounds) simulations\n (df_standard) = split_by_standard(df_original, compl_data)\n \n #Repeat everything for standartd and non-standard dataframes (our simulations and the simulations from everone in GPCRmd)\n for (stnd,df) in ((\"cmpl\", df_original), (\"stnd\", df_standard)):\n \n #If doesn't exists yet, create base input folder\n options_path = \"%scontmaps_inputs/%s/%s/%s/\" %(basepath, itype, stnd, ligandonly)\n os.makedirs(options_path, exist_ok=True)\n\n # If there are no interactions with this ligandonly-itype combination\n if df.empty:\n print(\"No interactions avalible for this molecular partners and interaction type: %s and %s\" % (ligandonly, itype) )\n return\n\n # Setting columns 'Position', 'leter+Position1' and 'leter+Position2' in df for jsons files \n df_columned = new_columns(df, itype)\n df_columned.to_pickle(options_path+\"dataframe_customflareplot.pkl\")\n\n #Dropping away Position columns, once they are not needed\n df_drop = df.drop(['Position1','Position2'], 1)\n \n # Stack matrix (one row for each interaction pair and dynamic. Colnames are position, dynid and itypes)\n df_ts = stack_matrix(df_drop, set_itypes)\n \n #Dropping away non selected-type interaction rows.\n df_drop = df_drop[df_drop['itype'] == itype]\n df_drop.drop('itype',axis=1, inplace=True)\n\n # Set position as row index of the dataframe\n df_drop = df_drop.set_index('Position') \n\n # Labels for dendogram\n dendlabels_dyns = list(df_drop.columns)\n \n # Making one-simulation flareplots. Only done in cmpl to avoid repeating same Simulations\n if stnd == \"cmpl\":\n sim_jsons_path = '%scontmaps_inputs/%s/simulation_jsons/%s/' % (basepath, itype, ligandonly)\n dyn_flareplots(df_columned, sim_jsons_path, dendlabels_dyns, itype, flare_template)\n\n #Computing frequency matrix\n dend_matrix = frequencies(df_drop)\n (recept_info,recept_info_order,df_ts,dyn_gpcr_pdb,index_dict)=improve_receptor_names(df_ts,compl_data)\n \n # Apending column with PDB ids\n pdb_id = recept_info_order['pdb_id']\n df_ts['pdb_id'] = df_ts['Id'].apply(lambda x: recept_info[x][pdb_id])\n \n #Storing dataframe with results in a CSV file, downloadable from web\n create_csvfile(options_path, recept_info,df_drop)\n\n # Add residue types to dataframe\n df_ts = add_restypes(df_ts, compl_data, recept_info, recept_info_order)\n\n #Preparing dendrogram folders and parameters\n dendfolder = options_path + \"dendrograms/\" \n os.makedirs(dendfolder, exist_ok = True)\n dend_height = int( int(df.shape[1]) * 18.5)\n dend_width = 450\n\n # Computing several dendrograms and corresponding json files\n #for cluster in [2]:# DEBUG\n for cluster in list(range(2,21)):\n print(' computing dendrogram with '+str(cluster)+' clusters')\n dendfile = (\"%s%iclusters_dendrogram.html\" % (dendfolder, cluster))\n (dyn_dend_order, clustdict) = dendrogram_clustering(dend_matrix, dendlabels_dyns, dend_height, dend_width, dendfile, cluster, recept_info, recept_info_order)\n # Write dynamicID-cluster dictionary on a json\n clustdir = \"%sflarejsons/%sclusters/\" % (options_path, cluster)\n os.makedirs(clustdir, exist_ok= True)\n with open(clustdir + \"clustdict.json\", 'w') as clusdictfile:\n dump(clustdict, clusdictfile, ensure_ascii=False, indent = 4)\n\n #Jsons for the flareplots of this combinations of clusters\n flareplot_json(df_columned, clustdict, clustdir, flare_template)\n \n #Store Simulation names and dyn on file\n create_dyntoname_file(dyn_dend_order, recept_info, recept_info_order, options_path)\n \n for rev in [\"norev\",\"rev\"]:\n #for rev in [\"norev\"]:# DEBUG\n # If rev option is setted to rev, duplicate all lines with the reversed-position version \n #(4x32-2x54 duplicates to 2x54-4x32)\n if rev == \"rev\":\n df_ts_rev = reverse_positions(df_ts)\n else:\n df_ts_rev = df_ts\n \n df_ts_rev = sort_simulations(df_ts_rev, dyn_dend_order)\n\n #Taking some variables for dataframe slicing\n max_columns = 45\n pairs_number = df_drop.shape[0]\n inter_number = df_ts_rev.shape[0]\n inter_per_pair = (inter_number/pairs_number)/2 if rev == \"rev\" else inter_number/pairs_number \n number_heatmaps = ceil((inter_number/inter_per_pair)/max_columns)\n \n #Create heatmap folder if not yet exists\n heatmap_path_jupyter = settings.MEDIA_ROOT + \"Precomputed/get_contacts_files/contmaps_inputs/%s/%s/%s/heatmaps/%s/\" % (itype,stnd,ligandonly,rev)\n heatmap_path = \"%sheatmaps/%s/\" % (options_path,rev)\n os.makedirs(heatmap_path, exist_ok=True)\n\n #Saving dataframe for future uses in customized heatmaps\n df_ts_rev.to_pickle(heatmap_path+\"dataframe_for_customized.pkl\")\n \n #Make heatmaps each 50 interacting pairs\n div_list = []\n heatmap_filename_list = []\n number_heatmap_list = []\n prev_slicepoint = 0\n for i in range(1,number_heatmaps+1):\n number_heatmap_list.append(str(i))\n\n #Slice dataframe. Also definig heigth and width of the heatmap\n slicepoint = int(i*inter_per_pair*max_columns)\n if i == number_heatmaps:\n df_slided = df_ts_rev[prev_slicepoint:]\n else:\n df_slided = df_ts_rev[prev_slicepoint:slicepoint]\n w = int(df_slided.shape[0]/inter_per_pair*20+40)\n prev_slicepoint = slicepoint\n h=dend_height\n\n # Define bokeh figure and hovertool\n \n hover = create_hovertool(itype, itypes_order, hb_itypes, typelist)\n mysource,p = define_figure(w, h, df_slided, hover, itype)\n\n # Creating javascript for side-window\n mysource = select_tool_callback(recept_info, recept_info_order, dyn_gpcr_pdb, itype, typelist, mysource)\n\n # Extract bokeh plot components and store them in lists\n script, div = components(p)\n div_list.append(div.lstrip())\n heatmap_filename = \"%s%iheatmap.html\" % (heatmap_path_jupyter,i)\n heatmap_filename_list.append(heatmap_filename)\n\n # Write heatmap on file\n heatmap_filename = \"%s%iheatmap.html\" % (heatmap_path,i)\n with open(heatmap_filename, 'w') as heatmap:\n heatmap.write(script)\n\n # Write lists as python variables in a python file\n variables_file = \"%svariables.py\" % (heatmap_path)\n with open(variables_file, 'w') as varfile:\n varfile.write(\"div_list = [\\'%s\\']\\n\" % \"\\',\\'\".join(div_list))\n varfile.write(\"heatmap_filename_list = [\\'%s\\']\\n\" % \"\\',\\'\".join(heatmap_filename_list))\n varfile.write(\"number_heatmaps_list = [\\'%s\\']\\n\" % \"\\',\\'\".join(number_heatmap_list))", "def entry_page() -> 'html':\n return render_template('entry.html',\n enry_language_list = l_list,\n\t\t\t\t\t\t enry_language_list_01 = l_list_01,\n the_title='欢迎来到翻译吧')", "def get_html(self):\r\n context = {\r\n 'course_key': self.runtime.course_id,\r\n 'display_name': self.display_name_with_default,\r\n 'tag': self.instructor_tags,\r\n 'source': self.source,\r\n 'instructions_html': self.instructions,\r\n 'content_html': self.content,\r\n 'annotation_storage': self.annotation_storage_url,\r\n 'token': retrieve_token(self.user_email, self.annotation_token_secret),\r\n }\r\n return self.system.render_template('textannotation.html', context)", "def gen_hts_lab_mono(self):\n chosen_fields_lst = ['start', 'stop', 'phone']\n\n self._hts_lab_mono_ttpl = \\\n lst.transp_ttpl(\n tuple(\n tuple(self._proc_ldic[field]) for field in chosen_fields_lst\n )\n )\n\n self._hts_lab_mono_ttpl = tuple(tuple([\"{:>10}\".format(row[0]), \"{:>10}\".format(row[1]), row[2]])\n for row in self._hts_lab_mono_ttpl)\n\n self.hts_lab_mono_prn = tuple(' '.join(l) for l in self._hts_lab_mono_ttpl)", "def setDesign(self, file):\n\n\t\tself.design.clear()\n\t\tself.design.readPDB(file)\n\t\tself.designFile = file\n\n\t\tlig = self.design.ligand\n\t\tif lig == None:\n\t\t\treturn\n\n\t\tprint \"LIGAND: Erep = \",lig.Erep,\"Eatr = \",lig.Eatr,\"EhbSC = \",lig.EhbSC", "def feature_table(chr_id, source, orient, genes, transcripts, cds, exons, unk):\n for gname, ginfo in genes.items():\n line = [str(chr_id), \n 'gbk_to_gff',\n ginfo[3],\n str(ginfo[0]),\n str(ginfo[1]),\n '.',\n ginfo[2],\n '.',\n 'ID='+str(gname)+';Name='+str(gname)+';Note='+ginfo[-1]]\n print '\\t'.join(line) \n ## construct the transcript line is not defined in the original file \n t_line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', ginfo[2], '.'] \n\n if not transcripts:\n t_line.append('ID=Transcript:'+str(gname)+';Parent='+str(gname))\n\n if exons: ## get the entire transcript region from the defined feature\n t_line[3] = str(exons[gname][0][0])\n t_line[4] = str(exons[gname][0][-1])\n elif cds:\n t_line[3] = str(cds[gname][0][0])\n t_line[4] = str(cds[gname][0][-1])\n print '\\t'.join(t_line) \n\n if exons:\n exon_line_print(t_line, exons[gname], 'Transcript:'+str(gname), 'exon')\n\n if cds:\n exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'CDS')\n if not exons:\n exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'exon')\n\n else: ## transcript is defined \n for idx in transcripts[gname]: \n t_line[2] = idx[3]\n t_line[3] = str(idx[0])\n t_line[4] = str(idx[1])\n t_line.append('ID='+str(idx[2])+';Parent='+str(gname))\n print '\\t'.join(t_line) \n \n ## feature line print call \n if exons:\n exon_line_print(t_line, exons[gname], str(idx[2]), 'exon')\n if cds:\n exon_line_print(t_line, cds[gname], str(idx[2]), 'CDS')\n if not exons:\n exon_line_print(t_line, cds[gname], str(idx[2]), 'exon')\n\n if len(genes) == 0: ## feature entry with fragment information \n \n line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', orient, '.'] \n fStart = fStop = None \n\n for eid, ex in cds.items(): \n fStart = ex[0][0] \n fStop = ex[0][-1]\n\n for eid, ex in exons.items(): \n fStart = ex[0][0] \n fStop = ex[0][-1]\n\n if fStart or fStart:\n\n line[2] = 'gene'\n line[3] = str(fStart)\n line[4] = str(fStop)\n line.append('ID=Unknown_Gene_' + str(unk) + ';Name=Unknown_Gene_' + str(unk))\n print \"\\t\".join(line)\n\n if not cds:\n line[2] = 'transcript'\n else:\n line[2] = 'mRNA'\n line[8] = 'ID=Unknown_Transcript_' + str(unk) + ';Parent=Unknown_Gene_' + str(unk)\n print \"\\t\".join(line)\n \n if exons:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')\n \n if cds:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'CDS')\n if not exons:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')\n \n unk +=1 \n\n return unk", "def jntToDisplay():\n DISPLAY=\"DISPLAY\"\n # check obj exist\n if pm.objExists(DISPLAY) != 1:\n pm.error(\"no object call DISPLAY\")\n jnt=pm.ls(\"*_ikJnt*\",\"*_fkJnt*\",\"*_ctrlJnt*\",type ='joint')\n for obj in jnt:\n\n pm.delete(obj + \".overrideDisplayType\",icn=1)\n pm.setAttr(obj + \".overrideEnabled\",1)\n pm.setAttr(obj + \".overrideDisplayType\",0)\n pm.connectAttr(DISPLAY + \".ctrlJntDisplay\",obj + \".overrideDisplayType\",f=1)\n pm.setAttr(DISPLAY + \".ctrlJntDisplay\",0) # set to normal\n\n jnt=pm.ls(\"*_skinJnt*\",\"*:*_skinJnt*\",type ='joint')\n for obj in jnt:\n pm.delete(obj + \".overrideDisplayType\",icn=1)\n pm.setAttr(obj + \".overrideEnabled\",1)\n pm.setAttr(obj + \".overrideDisplayType\",0)\n pm.connectAttr(DISPLAY + \".skeletonDisplay\",obj + \".overrideDisplayType\",f=1)\n pm.setAttr(DISPLAY + \".skeletonDisplay\",0) # set to normal\n\n\n pm.setAttr(DISPLAY + \".geoDisplay\",0) # set to normal\n pm.setAttr((\"GEO.overrideEnabled\"),1)\n pm.setAttr((\"GEO.overrideDisplayType\"),0)\n pm.delete((\"GEO.overrideDisplayType\"),icn=1)\n pm.connectAttr((DISPLAY + \".geoDisplay\"),(\"GEO.overrideDisplayType\"),f=1)", "def plot_markup():\n\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n plt.rc('font', size=18)", "def bibtex(self):\n return \"@comment{%(id)s: %(message)s}\" % \\\n {'id': self.id, 'message': self.message}", "def display(self, char, mask=None, isCursorCell=False):\n\n if char == '&':\n char = '&amp;'\n elif char == '<':\n char = '&lt;'\n elif char == '\\t':\n char = '$t'\n\n markup = self.MARKUP_NORMAL\n if isCursorCell:\n markup = markup % self.MARKUP_CURSOR_CELL\n label, = self._displayedChar.get_children()\n label.set_markup(markup % char)\n\n if mask in [DOT_7, DOTS_78]:\n self.dot7.raiseDot()\n if mask in [DOT_8, DOTS_78]:\n self.dot8.raiseDot()", "def __init__(self, hilbert):\n super().__init__(DoubledHilbert(hilbert))", "def write_label(self, contig_name, width, height, font, title_width, upper_left, vertical_label,\n strand, canvas, horizontal_centering=False, center_vertical=False, chop_text=True,\n label_color=(50, 50, 50, 255)):\n upper_left = list(upper_left) # to make it mutable\n shortened = contig_name[-title_width:] # max length 18. Last characters are most unique\n txt = Image.new('RGBA', (width, height))#, color=(0,0,0,50))\n txt_canvas = ImageDraw.Draw(txt)\n text_width = txt_canvas.textsize(shortened, font)[0]\n if not chop_text and text_width > width:\n txt = Image.new('RGBA', (text_width, height)) # TODO performance around txt_canvas\n txt_canvas = ImageDraw.Draw(txt)\n if center_vertical or vertical_label: # Large labels are centered in the column to look nice,\n # rotation indicates strand in big text\n vertically_centered = (height // 2) - multi_line_height(font, shortened, txt)//2\n else: # Place label at the beginning of gene based on strand\n vertically_centered = height - multi_line_height(font, shortened, txt) # bottom\n if strand == \"+\":\n vertically_centered = 0 # top of the box\n txt_canvas.multiline_text((0, max(0, vertically_centered)), shortened, font=font,\n fill=label_color)\n if vertical_label:\n rotation_direction = 90 if strand == '-' else -90\n txt = txt.rotate(rotation_direction, expand=True)\n upper_left[1] += -4 if strand == '-' else 4\n if horizontal_centering:\n margin = width - text_width\n upper_left[0] += margin // 2\n canvas.paste(txt, (upper_left[0], upper_left[1]), txt)", "def generate_leaflet(self):\n\n args = {}\n args['title'] = self.options.title.replace('\"', '\\\\\"')\n args['htmltitle'] = self.options.title\n args['south'], args['west'], args['north'], args['east'] = self.swne\n args['centerlon'] = (args['north'] + args['south']) / 2.\n args['centerlat'] = (args['west'] + args['east']) / 2.\n args['minzoom'] = self.tminz\n args['maxzoom'] = self.tmaxz\n args['beginzoom'] = self.tmaxz\n args['tilesize'] = self.tilesize # not used\n args['tileformat'] = self.tileext\n args['publishurl'] = self.options.url # not used\n args['copyright'] = self.options.copyright.replace('\"', '\\\\\"')\n\n s = \"\"\"<!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no' />\n <title>%(htmltitle)s</title>\n\n <!-- Leaflet -->\n <link rel=\"stylesheet\" href=\"http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.css\" />\n <script src=\"http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.js\"></script>\n\n <style>\n body { margin:0; padding:0; }\n body, table, tr, td, th, div, h1, h2, input { font-family: \"Calibri\", \"Trebuchet MS\", \"Ubuntu\", Serif; font-size: 11pt; }\n #map { position:absolute; top:0; bottom:0; width:100%%; } /* full size */\n .ctl {\n padding: 2px 10px 2px 10px;\n background: white;\n background: rgba(255,255,255,0.9);\n box-shadow: 0 0 15px rgba(0,0,0,0.2);\n border-radius: 5px;\n text-align: right;\n }\n .title {\n font-size: 18pt;\n font-weight: bold;\n }\n .src {\n font-size: 10pt;\n }\n\n </style>\n\n </head>\n <body>\n\n <div id=\"map\"></div>\n\n <script>\n /* **** Leaflet **** */\n\n // Base layers\n // .. OpenStreetMap\n var osm = L.tileLayer('http://{s}.tile.osm.org/{z}/{x}/{y}.png', {attribution: '&copy; <a href=\"http://osm.org/copyright\">OpenStreetMap</a> contributors'});\n\n // .. CartoDB Positron\n var cartodb = L.tileLayer('http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png', {attribution: '&copy; <a href=\"http://www.openstreetmap.org/copyright\">OpenStreetMap</a> contributors, &copy; <a href=\"http://cartodb.com/attributions\">CartoDB</a>'});\n\n // .. OSM Toner\n var toner = L.tileLayer('http://{s}.tile.stamen.com/toner/{z}/{x}/{y}.png', {attribution: 'Map tiles by <a href=\"http://stamen.com\">Stamen Design</a>, under <a href=\"http://creativecommons.org/licenses/by/3.0\">CC BY 3.0</a>. Data by <a href=\"http://openstreetmap.org\">OpenStreetMap</a>, under <a href=\"http://www.openstreetmap.org/copyright\">ODbL</a>.'});\n\n // .. White background\n var white = L.tileLayer(\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEAAQMAAABmvDolAAAAA1BMVEX///+nxBvIAAAAH0lEQVQYGe3BAQ0AAADCIPunfg43YAAAAAAAAAAA5wIhAAAB9aK9BAAAAABJRU5ErkJggg==\");\n\n // Overlay layers (TMS)\n var lyr = L.tileLayer('./{z}/{x}/{y}.%(tileformat)s', {tms: true, opacity: 0.7, attribution: \"%(copyright)s\"});\n\n // Map\n var map = L.map('map', {\n center: [%(centerlon)s, %(centerlat)s],\n zoom: %(beginzoom)s,\n minZoom: %(minzoom)s,\n maxZoom: %(maxzoom)s,\n layers: [osm]\n });\n\n var basemaps = {\"OpenStreetMap\": osm, \"CartoDB Positron\": cartodb, \"Stamen Toner\": toner, \"Without background\": white}\n var overlaymaps = {\"Layer\": lyr}\n\n // Title\n var title = L.control();\n title.onAdd = function(map) {\n this._div = L.DomUtil.create('div', 'ctl title');\n this.update();\n return this._div;\n };\n title.update = function(props) {\n this._div.innerHTML = \"%(title)s\";\n };\n title.addTo(map);\n\n // Note\n var src = 'Generated by <a href=\"http://www.klokan.cz/projects/gdal2tiles/\">GDAL2Tiles</a>, Copyright &copy; 2008 <a href=\"http://www.klokan.cz/\">Klokan Petr Pridal</a>, <a href=\"http://www.gdal.org/\">GDAL</a> &amp; <a href=\"http://www.osgeo.org/\">OSGeo</a> <a href=\"http://code.google.com/soc/\">GSoC</a>';\n var title = L.control({position: 'bottomleft'});\n title.onAdd = function(map) {\n this._div = L.DomUtil.create('div', 'ctl src');\n this.update();\n return this._div;\n };\n title.update = function(props) {\n this._div.innerHTML = src;\n };\n title.addTo(map);\n\n\n // Add base layers\n L.control.layers(basemaps, overlaymaps, {collapsed: false}).addTo(map);\n\n // Fit to overlay bounds (SW and NE points with (lat, lon))\n map.fitBounds([[%(south)s, %(east)s], [%(north)s, %(west)s]]);\n\n </script>\n\n </body>\n </html>\n\n \"\"\" % args # noqa\n\n return s", "def addLabels(t):\n if not t.label:\n t.label = \"\".join([choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(4)])\n for r,w in t.children:\n addLabels(r)", "def templates(self, chans='max', cindex='nidi'):\n core.plot_templates(self, chans=chans, cindex=cindex)", "def formatted_label_string(self):\n return Template(self.zpl_template.template).safe_substitute(self.label_context)", "def Template(Fenetre_largeur,Fenetre_hauteur):\r\n li= Select_ligne(\"Nombre de lignes: \",Fenetre_largeur,Fenetre_hauteur)\r\n nom=\"Template\"\r\n fich=\"Template\"\r\n version=0\r\n while Path(\"stages/\"+fich+\".txt\").is_file() == True:\r\n version+=1\r\n fich=nom+str(version)\r\n fichier=open(\"stages/\"+fich+\".txt\",'w')\r\n fichier.write(str(li))\r\n fichier.write(\"\\n\")\r\n fichier.write(\"\\n\")\r\n for i in range(li):\r\n for j in range(10):\r\n fichier.write(\"0,0|\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"gauche: resistance, droite: bonus\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"resistance max: 3\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"6=barre+\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"7=score+\")\r\n fichier.close()", "def show_template_bundles(final_streamlines, template_path, fname):\n import nibabel as nib\n from fury import actor, window\n\n renderer = window.Renderer()\n template_img_data = nib.load(template_path).get_data().astype(\"bool\")\n template_actor = actor.contour_from_roi(\n template_img_data, color=(50, 50, 50), opacity=0.05\n )\n renderer.add(template_actor)\n lines_actor = actor.streamtube(\n final_streamlines, window.colors.orange, linewidth=0.3\n )\n renderer.add(lines_actor)\n window.record(renderer, n_frames=1, out_path=fname, size=(900, 900))\n return", "def _backup_ligand_extraction(self, pdb, resname):\n from rdkit import Chem\n ligand_lines = []\n\n with open(pdb, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n if (line.startswith(\"ATOM\") or line.startswith(\"HETATM\")) and line[17:20].strip() == resname:\n ligand_lines.append(line)\n ligand_block = \"\".join(ligand_lines)\n ligand = Chem.MolFromPDBBlock(ligand_block)\n return ligand", "def generate_labels(pics):\r\n return []", "def node():\n return render_template('nodes.html')", "def abstract(res):\n o = r'\\begin{center}'\n #o += r'\\begin{tikzpicture}[x={(-0.5cm,-0.5cm)}, y={(0.966cm,-0.2588cm)}, z={(0cm,1cm)}, scale=0.6, color={lightgray},opacity=.5]'\n #o += r'\\tikzset{facestyle/.style={fill=lightgray,draw=black,very thin,line join=round}}'\n o += r'\\begin{tikzpicture}' + '\\n'\n o += r'\\draw[blue!40!white] (0,0) rectangle (6,4); \\fill[blue!10!white] (2.4,4) rectangle (3.6,4.15);' + '\\n'\n for i in range(res[7]):\n x,y = random.randrange(2,538)/100.0,random.randrange(2,300)/100.0\n o += r'\\filldraw[fill=green!30!white,draw=white] (%s,%s) rectangle (%s,%s);'%(x,y,(x+0.58),(y+0.38)) + '\\n'\n o += r'\\filldraw[white] (%s,%s) -- (%s,%s);'%((x+0.29),(y+0.19),(x+0.58),(y+0.38)) + '\\n'\n o += r'\\filldraw[white] (%s,%s) -- (%s,%s);'%(x,(y+0.38),(x+0.29),(y+0.19)) + '\\n'\n o += r'\\end{tikzpicture}\\end{center}' + '\\n'\n\n o += r'\\begin{tabular}{|l|l|}' + '\\n' + r'\\hline' + '\\n'\n o += r'Election name& \\texttt{\"%s\"}\\\\'%res[1]\n o += r'Box number& \\texttt{%s}\\\\'%res[2]\n o += r'State& \\texttt{%s}\\\\'%('closed' if res[5] == cloSt else ('vote' if res[5] == votSt else 'register'))\n o += r'Registration end& \\textit{%s}\\\\'%res[3]\n o += r'Vote closing& \\textit{%s}\\\\'%res[4]\n o += r'Casted/registered& $%s/%s$\\\\'%(res[7],res[6]) +'\\n'\n o += r'\\hline\\end{tabular}' + '\\n'\n o += r'\\quad' + '\\n'\n o += r'\\begin{tabular}{|l|l|}\\hline'\n l = res[8].items()\n l.sort(key = operator.itemgetter(1),reverse=True)\n n = 0\n for x in l:\n o += r'%s & %s\\\\'%(r'\\textit{%s}'%x[0] if x[0] == 'White Ballot' else r'\\texttt{%s}'%x[0],x[1]) \n if n == 0:\n o += r'\\hline' + '\\n'\n n += 1\n o += r'\\hline\\end{tabular}' + '\\n'\n\n\n #o += r'\\item The designer\\textquoteright s digital signature of this document (the source file \\texttt{oeu.py}) is: \\tiny $$\\verb!%s!$$ \\normalsize'%rsa(IKey).sign(open(__file__).read()) + '\\n'\n\n return r'\\begin{abstract}' + abstract.__doc__ + r'\\end{abstract}' + o", "def _repr_html_(self):\n return (\n f'<b>GalaxyCluster:</b> {self.unique_id} '\n f'(ra={self.ra}, dec={self.dec}) at z={self.z}'\n f'<br>> <b>with columns:</b> {self._str_colnames()}'\n f'<br>> {len(self.galcat)} source galaxies'\n f'<br>{self.galcat._html_table()}'\n )", "def astext(self):\n self.elements.update({\n 'body': u''.join(self.body),\n 'indices': self.generate_indices()\n })\n return self.render('beamer.tex_t', self.elements)", "def make_kml_format(self,kml_template):\n if self.as_type == 'A':\n self.kml_lines = kml_template['good_subdivided']['placemark']\n elif self.as_type == 'B':\n self.kml_lines = kml_template['bad_subdivided']['placemark']\n else:\n print('Unknown airspace type')\n # get idx of name and coordinates\n idxLine = 0\n while idxLine < len(self.kml_lines):\n #print(self.kml_lines[idxLine]\n if self.kml_lines[idxLine].startswith('\\t\\t\\t\\t<name>'): # begin of airspace\n idx_name = idxLine\n if '\\t\\t\\t\\t\\t\\t\\t<coordinates>\\n' in self.kml_lines[idxLine]: # begin of airspace\n idx_coordinates = idxLine+1\n idxLine += 1\n # transform coordinates\n # add all coordinates: Format is:\n # source: 'DP 50:26:22 N 012:17:59 E\\n'\n # target: 9.025830271397426,53.46493577242719,0 8.986157446488383,53.46952117358134,0\n coo_list = [] # collect list of coorinates as strings\n for line in self.txt_lines:\n if line.startswith('AN'):\n self.name = line[3:].replace('\\n','')\n self.kml_lines[idx_name] = '\\t\\t\\t\\t<name>%s</name>\\n' % self.name\n\n if line.startswith('DP'):\n # lon\n lon_deg = float(line[14:17])\n lon_min = float(line[18:20])\n lon_sec = float(line[21:23])\n lon_dec = (lon_sec / 60 + lon_min) / 60 + lon_deg\n if line[24] == 'W':\n lon_dec *= -1 # negative if west\n # lat\n lat_deg = float(line[3:5])\n lat_min = float(line[6:8])\n lat_sec = float(line[9:11])\n lat_dec = (lat_sec / 60 + lat_min) / 60 + lat_deg\n if line[12] == 'S':\n lat_dec *= -1 # negative if west\n # attach coordinates\n coo_list.append('%1.16f,%1.16f,0 ' % (lon_dec,lat_dec))\n # store for later plotting\n self.lat_dec.append(lat_dec)\n self.lon_dec.append(lon_dec)\n\n # make sure that shape is closed --> first an last point must be the same\n if coo_list[0] != coo_list[-1]:\n coo_list.append(coo_list[0])\n self.lat_dec.append(self.lat_dec[0])\n self.lon_dec.append(self.lon_dec[0])\n\n # write coordinate strings into kml\n self.kml_lines[idx_coordinates] = '\\t\\t\\t\\t\\t\\t\\t\\t' # is prefix. Coordinates to be added as string below\n for pt in coo_list:\n self.kml_lines[idx_coordinates] += pt\n print('Converted airspace %s' % self.name)", "def venn_diagram(df_NP, taxonomy_Double):\n taxonomy_Single = [list(tax) for tax in df_NP.taxonomy if 'double' not in tax]\n taxonomy_All = taxonomy_Single + taxonomy_Double\n plants = set()\n bacteria = set()\n animals = set()\n fungi = set()\n for tax_list in taxonomy_All:\n if \"plants\" in tax_list:\n for tax in tax_list:\n plants.add(tax.index)\n if \"bacteria\" in tax_list:\n for tax in tax_list:\n bacteria.add(tax.index)\n if \"animals\" in tax_list:\n for tax in tax_list:\n animals.add(tax.index)\n if \"fungi\" in tax_list:\n for tax in tax_list:\n fungi.add(tax.index) \n dic_for_venn = {\"plants\": plants, \"bacteria\": bacteria, \"animals\": animals, \"fungi\": fungi}\n fig= venn.venn(dic_for_venn)\n plt.title(\"venn-diagram from the taxonomy of aglycons\")\n plt.savefig(\"output_data/Venn-Diagram.png\")\n print(\"VENN DIAGRAM DONE\")", "def substituteSLAText(orig, parsed):\n p = orig.parentNode\n doc = orig.ownerDocument\n substitutions = []\n for type, data in parsed:\n if type is RAW:\n substitutions.append(doc.createTextNode(data))\n elif type is QUAL:\n div = doc.createElement('div')\n div.setAttribute('p:property', 'qualifier')\n tn = doc.createTextNode('(' + data + ')')\n div.appendChild(tn)\n substitutions.append(div)\n elif type is FEND:\n div = doc.createElement('div')\n div.setAttribute('p:property', 'frequencyEnd')\n substitutions.extend([div, doc.createTextNode(data)])\n elif type is FSTART:\n div = doc.createElement('div')\n div.setAttribute('p:property', 'frequencyStart')\n div.setAttribute('content', data)\n substitutions.extend([div, doc.createTextNode(data + '-')])\n substituteNodes(orig, substitutions)" ]
[ "0.5465929", "0.5270106", "0.50946057", "0.5082453", "0.5056115", "0.5039706", "0.5006837", "0.4973848", "0.49720326", "0.49520364", "0.4933591", "0.492104", "0.49198148", "0.4917505", "0.4915033", "0.49145043", "0.4857332", "0.4847261", "0.48379815", "0.48348796", "0.48308426", "0.4828999", "0.4826734", "0.48256764", "0.48215148", "0.48004907", "0.47940862", "0.47921744", "0.47598234", "0.4750477", "0.4747372", "0.4745092", "0.47437263", "0.47373694", "0.4725457", "0.4700609", "0.47003037", "0.4698952", "0.4696251", "0.46922055", "0.46901587", "0.46885133", "0.46838114", "0.46836552", "0.46801037", "0.46776223", "0.4676762", "0.46718845", "0.46705252", "0.46495974", "0.46458277", "0.4634153", "0.4630914", "0.4627858", "0.462591", "0.46226853", "0.4622309", "0.46180278", "0.46166745", "0.4604869", "0.4604869", "0.45970854", "0.45962584", "0.45949697", "0.45939234", "0.45888343", "0.4587897", "0.45836055", "0.45825428", "0.45815343", "0.45775792", "0.457452", "0.45744333", "0.4574044", "0.45674965", "0.45630956", "0.45627788", "0.45612812", "0.45604995", "0.45586008", "0.45560327", "0.45541358", "0.45504913", "0.45504752", "0.4546811", "0.45455524", "0.45412967", "0.453566", "0.45343918", "0.45336705", "0.45264333", "0.4525274", "0.45238897", "0.45236725", "0.45226774", "0.4522004", "0.45218307", "0.45214665", "0.45197555", "0.4511489", "0.4509189" ]
0.0
-1
Depict ligand using userprovided templates
def _get_2D_by_template(self, mol): results = list() try: for key, template in self.templates.items(): temp_mol = Chem.RWMol(mol) if temp_mol.HasSubstructMatch(template): AllChem.GenerateDepictionMatching2DStructure(temp_mol, template) flaws = DepictionValidator(temp_mol).depiction_score() results.append( DepictionResult( source=DepictionSource.Template, template_name=key, mol=temp_mol, score=flaws, ) ) except Exception: # if it fails it fails, but generally it wont logging.warning("Depiction generation by template failed") return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data_mrk():\n return render_template(\"l_markers.html\")", "def get_html(self):\r\n context = {\r\n 'display_name': self.display_name_with_default,\r\n 'instructions_html': self.instructions,\r\n 'annotation_storage': self.annotation_storage_url,\r\n 'token': retrieve_token(self.user, self.annotation_token_secret),\r\n 'tag': self.instructor_tags,\r\n 'openseadragonjson': self.openseadragonjson,\r\n }\r\n\r\n return self.system.render_template('imageannotation.html', context)", "def tagger():", "def mentions_legales(request):\n\n return render(request, \"products/mentions_legales.html\")", "def machinelearn2():\n return render_template('frontml.html')", "def make_lexicon_txt(self):\n raise NotImplementedError", "def generate_example_label(self):\n label_markup = \"\"\"{keywords}procedure{i} {blocks}Foo{i}\n (An_Integer : {keywords}in out{i} {types}Integer{i} := {numbers}0{i};\n A_String : {types}String{i} := {strings}\"some text\"{i})\n {keywords}with{i} {aspects}Pre => An_Integer >= -1{i};\n{comments}-- Documentation for Foo{i}\n\n---------\n-- Foo --\n---------\n\n{keywords}procedure{i} {blocks}Foo{i}\n (An_Integer : {keywords}in out{i} {types}Integer{i} := {numbers}0{i};\n A_String : {types}String{i} := {strings}\"some text\"{i}) {keywords}is{i}\n{keywords}begin{i}\n {comments}-- Do the actual loop{i}\n\n {keywords}for{i} J {keywords}in{i} A_String'Range {keywords}loop{i}\n Put_Line ({strings}\"bla\"{i} &amp; (A + {numbers}10{i})'Img);\n {keywords}end loop{i};\n{keywords}end{i} {blocks}Foo{i};\n\"\"\"\n # Add line numbers\n num = 1\n prefixed = []\n\n # Compute the preview's gutter foreground color by mixing the editor's\n # foreground and background color.\n # This formula needs to be synchronized with the formula that computes\n # the 'gutter_color' in gps.css.\n gutter_fg_color = self.d['editor_fg'].mix(self.d['editor_bg'], 0.6)\n\n for line in label_markup.splitlines():\n prefixed.append(\n '<span color=\"{}\">{:4d} </span> {}'.format(\n gutter_fg_color.to_hex6_string(),\n num,\n line))\n num = num + 1\n\n font = GPS.Preference(\"Src-Editor-Reference-Style\").get().split(\"@\")[0]\n label_markup = '<span font=\"{}\">'.format(font) + '\\n'.join(\n prefixed) + '</span>'\n\n b = Gtk.HBox()\n label = Gtk.Label()\n b.pack_start(label, False, False, 0)\n _, bg = Gdk.Color.parse(self.d['editor_bg'].to_hex6_string())\n _, fg = Gdk.Color.parse(self.d['editor_fg'].to_hex6_string())\n b.modify_bg(Gtk.StateType.NORMAL, bg)\n label.modify_fg(Gtk.StateType.NORMAL, fg)\n process_dict = {'i': \"</span>\"}\n for key in ['keywords', 'blocks', 'comments',\n 'strings', 'numbers', 'aspects', 'types']:\n val = self.d[key]\n process_dict[key] = '<span {} {} {}>'.format(\n 'color=\"{}\"'.format(val[1].to_hex6_string())\n if val[1].a != 0.0 else '',\n 'background=\"{}\"'.format(val[2].to_hex6_string())\n if val[2].a != 0.0 else '',\n\n 'font-weight=\"BOLD\"' if 'BOLD' in val[0] else '' +\n ' font-style=\"ITALIC\"' if \"ITALIC\" in val[0] else '')\n\n label.set_markup(label_markup.format(**process_dict))\n return b", "def render_knowl_in_template(knowl_content, **kwargs):\n render_me = u\"\"\"\\\n {%% include \"knowl-defs.html\" %%}\n {%% from \"knowl-defs.html\" import KNOWL with context %%}\n {%% from \"knowl-defs.html\" import KNOWL_LINK with context %%}\n {%% from \"knowl-defs.html\" import KNOWL_INC with context %%}\n {%% from \"knowl-defs.html\" import TEXT_DATA with context %%}\n\n %(content)s\n \"\"\"\n knowl_content = md_preprocess(knowl_content)\n\n # markdown enabled\n render_me = render_me % {'content': md.convert(knowl_content)}\n # Pass the text on to markdown. Note, backslashes need to be escaped for\n # this, but not for the javascript markdown parser\n try:\n return render_template_string(render_me, **kwargs)\n except Exception as e:\n return \"ERROR in the template: %s. Please edit it to resolve the problem.\" % e", "def forest_code(aut,\n\tname='',\n\tdomain='wrt QNB',\n\tinclude_styles = True,\n\thoriz=True,\n\tLTR=True,\n\tstandalone=True,\n\tdraw_revealing=True,\n):\n\tif name.startswith('$') and not name.endswith('$'):\n\t\traise ValueError(\"Arrow names must end with a $ if they begin with a $.\")\n\t\n\t#1. Decide which domain to use for plotting.\n\tdomain = handle_domain(domain, aut)\n\trange, intersection = intersection_from_domain(domain, aut)\n\tis_repatt_specialised = partial(is_repatt, intersection=intersection, aut=aut)\n\t\n\t# Number the leaves.\n\tdomain = [ ( w, i, draw_revealing and is_repatt_specialised(w, True) )\n\t\tfor (i, w) in enumerate(domain, start=1)]\n\trange = [ ( w, i, draw_revealing and is_repatt_specialised(w, False))\n\t\tfor (i, w) in enumerate(range, start=1) ]\n\t#Order the range using Higman's words\n\trange.sort()\n\t\n\ttemplate = setup()\n\treturn template.render(\n\t\t#options\n\t\tname = name,\n\t\tdomain = domain,\n\t\trange = range,\n\t\thoriz = horiz,\n\t\tstandalone = standalone,\n\t\tinclude_styles = include_styles,\n\t\twrite_word = partial(write_word, intersection = intersection),\n\t\tLTR = LTR\n\t)", "def markov_story():\n return render_template(\"markovstory.html\")", "def _prepare_ligand_template(self, ligand_template: pd.Series) -> oechem.OEMolBase:\n from openeye import oechem\n\n from ..modeling.OEModeling import read_molecules, select_chain, select_altloc, remove_non_protein\n from ..utils import FileDownloader, LocalFileStorage\n\n logging.debug(\"Interpreting structure ...\")\n pdb_path = LocalFileStorage.rcsb_structure_pdb(\n ligand_template[\"structure.pdb_id\"], self.cache_dir\n )\n if not pdb_path.is_file():\n logging.debug(\n f\"Downloading PDB entry {ligand_template['structure.pdb_id']} ...\"\n )\n FileDownloader.rcsb_structure_pdb(ligand_template[\"structure.pdb_id\"], self.cache_dir)\n logging.debug(\"Reading structure ...\")\n ligand_template_structure = read_molecules(pdb_path)[0]\n\n logging.debug(\"Selecting chain ...\")\n ligand_template_structure = select_chain(ligand_template_structure, ligand_template[\"structure.chain\"])\n\n if ligand_template[\"structure.alternate_model\"] != \"-\":\n logging.debug(\"Selecting alternate location ...\")\n try:\n ligand_template_structure = select_altloc(\n ligand_template_structure, ligand_template[\"structure.alternate_model\"]\n )\n except ValueError:\n logging.debug(\n \"Could not find alternate location \"\n f\"{ligand_template['structure.alternate_model']} for PDB entry \"\n f\"{ligand_template['structure.pdb_id']} chain \"\n f\"{ligand_template['structure.chain']}. Continuing without selecting \"\n \"alternate location ...\"\n )\n pass\n\n logging.debug(\"Removing everything but protein, water and ligand of interest ...\")\n ligand_template_structure = remove_non_protein(\n ligand_template_structure, exceptions=[ligand_template[\"ligand.expo_id\"]], remove_water=False\n )\n\n logging.debug(\"Adding hydrogens ...\")\n oechem.OEPlaceHydrogens(ligand_template_structure)\n\n return ligand_template_structure", "def gen_row_for_html(usage_flag, image_link_template, error_info_template, document_text, image_list,\n image_idx, landmark_name, landmark_worlds, error_summary, picture_folder='./pictures', width=200):\n image_name = image_list[image_idx]\n image_basename = image_name.split('/')[0]\n case_info = r'<b>Case nunmber</b>:{0} : {1} , '.format(image_idx, image_name)\n\n labelled_images = [image_basename + '_label_lm{}_axial.png'.format(landmark_name),\n image_basename + '_label_lm{}_coronal.png'.format(landmark_name),\n image_basename + '_label_lm{}_sagittal.png'.format(landmark_name)]\n labelled_point = landmark_worlds[0]\n \n if usage_flag == 1:\n error_info = error_info_template.format(landmark_worlds[0][0],\n landmark_worlds[0][1],\n landmark_worlds[0][2])\n \n elif usage_flag == 2:\n detected_images = [image_basename + '_detection_lm{}_axial.png'.format(landmark_name),\n image_basename + '_detection_lm{}_coronal.png'.format(landmark_name),\n image_basename + '_detection_lm{}_sagittal.png'.format(landmark_name)]\n detected_point = landmark_worlds[1]\n\n assert error_summary is not None\n x_error = error_summary.error_dx[landmark_name][image_idx]\n y_error = error_summary.error_dy[landmark_name][image_idx]\n z_error = error_summary.error_dz[landmark_name][image_idx]\n l2_error = error_summary.error_l2[landmark_name][image_idx]\n type_error = error_summary.error_type[landmark_name][image_idx]\n error_info = error_info_template.format(labelled_point[0],\n labelled_point[1],\n labelled_point[2],\n detected_point[0],\n detected_point[1],\n detected_point[2],\n type_error,\n x_error,\n y_error,\n z_error,\n l2_error)\n else:\n raise ValueError('Unsupported flag type!')\n\n document_text = add_document_text(document_text, case_info)\n document_text = add_document_text(document_text, error_info)\n \n document_text += \"\\n\"\n document_text = add_document_text(document_text, \"<table border=1><tr>\")\n document_text = add_three_images(document_text, image_link_template, picture_folder, labelled_images, width)\n if usage_flag == 2:\n document_text = add_three_images(document_text, image_link_template, picture_folder, detected_images, width)\n document_text += \"\\n\"\n document_text = add_document_text(document_text, r'</tr></table>')\n\n return document_text", "def get_gsub_ligature_lookup(font):\n\n # The template might include more lookups after lookup 0, if it has a\n # GSUB table.\n if 'GSUB' not in font:\n ligature_subst = otTables.LigatureSubst()\n ligature_subst.ligatures = {}\n\n lookup = otTables.Lookup()\n lookup.LookupType = 4\n lookup.LookupFlag = 0\n lookup.SubTableCount = 1\n lookup.SubTable = [ligature_subst]\n\n font['GSUB'] = add_emoji_gsub.create_simple_gsub([lookup])\n else:\n lookup = font['GSUB'].table.LookupList.Lookup[0]\n assert lookup.LookupFlag == 0\n\n # importXML doesn't fully init GSUB structures, so help it out\n st = lookup.SubTable[0]\n if not hasattr(lookup, 'LookupType'):\n assert st.LookupType == 4\n setattr(lookup, 'LookupType', 4)\n\n if not hasattr(st, 'ligatures'):\n setattr(st, 'ligatures', {})\n\n return lookup", "def render_ents(\n self, text: str, spans: List[Dict[str, Any]], title: Optional[str]\n ) -> str:\n markup = \"\"\n offset = 0\n for span in spans:\n label = span[\"label\"]\n start = span[\"start\"]\n end = span[\"end\"]\n kb_id = span.get(\"kb_id\", \"\")\n kb_url = span.get(\"kb_url\", \"#\")\n kb_link = TPL_KB_LINK.format(kb_id=kb_id, kb_url=kb_url) if kb_id else \"\"\n additional_params = span.get(\"params\", {})\n entity = escape_html(text[start:end])\n fragments = text[offset:start].split(\"\\n\")\n for i, fragment in enumerate(fragments):\n markup += escape_html(fragment)\n if len(fragments) > 1 and i != len(fragments) - 1:\n markup += \"<br>\"\n if self.ents is None or label.upper() in self.ents:\n color = self.colors.get(label.upper(), self.default_color)\n ent_settings = {\n \"label\": label,\n \"text\": entity,\n \"bg\": color,\n \"kb_link\": kb_link,\n }\n ent_settings.update(additional_params)\n markup += self.ent_template.format(**ent_settings)\n else:\n markup += entity\n offset = end\n fragments = text[offset:].split(\"\\n\")\n for i, fragment in enumerate(fragments):\n markup += escape_html(fragment)\n if len(fragments) > 1 and i != len(fragments) - 1:\n markup += \"<br>\"\n markup = TPL_ENTS.format(content=markup, dir=self.direction)\n if title:\n markup = TPL_TITLE.format(title=title) + markup\n return markup", "def features():\n\n return render_template('features.html')", "def __str__(self):\n t = Template(\n \"\"\"\n <h4>$title</h4>\n $imgs\n $footnotes\n <hr/>\"\"\")\n # Return result.\n return t.substitute({\n \"title\": self.title,\n \"imgs\": self.render_images(),\n \"footnotes\": self.render_footnotes()\n })", "def label(mi_, ma_):\n\treturn \"caractères Unicode des points de code {} à {}\".format(mi_, ma_)", "def get_html(self):\r\n context = {\r\n 'course_key': self.runtime.course_id,\r\n 'display_name': self.display_name_with_default,\r\n 'tag': self.instructor_tags,\r\n 'source': self.source,\r\n 'instructions_html': self.instructions,\r\n 'content_html': self.content,\r\n 'annotation_storage': self.annotation_storage_url,\r\n 'token': retrieve_token(self.user_email, self.annotation_token_secret),\r\n }\r\n return self.system.render_template('textannotation.html', context)", "def astext(self):\n self.elements.update({\n 'body': u''.join(self.body),\n 'indices': self.generate_indices()\n })\n return self.render('beamer.tex_t', self.elements)", "def rep_legtags(text, footnotes):\n textstring = text\n tagsintext = []\n taggedtextlist = []\n tagtextpat = re.compile(r'\\[/[a-z]\\]')\n tagtextitir = tagtextpat.finditer(textstring)\n for tagfound in tagtextitir:\n closetag = tagfound.group()\n opentag = \"[\" + closetag[2:]\n tag = opentag[1:-1]\n tagsintext.append(tag)\n tagtextlist = get_tagtext(textstring, tag)\n for taggedtext in tagtextlist:\n tagstring = opentag + taggedtext + closetag\n taggedtextlist.append(tagstring)\n for tag in tagsintext:\n tagplace = tagsintext.index(tag)\n replacetext = taggedtextlist[tagplace]\n for footnote in footnotes:\n if footnote[:2] == tag + \" \":\n if \"[/LRep]\" in footnote:\n replacementlist = get_tagtext(footnote, \"LRep\")\n repstring = \"[LRep]\" + replacementlist[0] + \"[/LRep]\"\n textstringlist = textstring.split(replacetext)\n textstring = repstring.join(textstringlist)\n return textstring", "def render(self):", "def render(self, mode='human'):", "def painel():\n return render_template('home/painel.html', title=\"Painel\")", "def gt(text, font=DEFAULT_FONT, color=\"magenta\",\n on_color=None, attr=None, width=80,\n justify=\"center\"):\n\n f = Figlet(\n font, width=width,\n justify=justify\n )\n r = f.renderText(text)\n return colored(r, color, on_color, attr)", "def markup_text(self, text):\n for moniker, name in S['names'].items():\n text = text.replace('${0}'.format(moniker.split('_')[1]), name)\n return text", "def definition(request, word_to_lookup):\n return render(request, 'definition.html')", "def generate_gazettes(self):\n # TODO: generate_gazettes\n pass", "def tag(self):\n \n tag = super(self.__class__, self).tag();\n tag = als.tag_join(tag, als.stra(self.strain));\n tag = als.tag_join(tag, als.stra(self.dtype));\n tag = als.tag_join(tag, 'w=%s' % als.stra(self.wid)); \n tag = als.tag_join(tag, 's=%s' % als.stra(self.stage));\n #tag = analysis.tag_join(tag, 'l=%s' % analysis.stra(self.label)); \n\n return tag;", "def render(self, template: str, **vars) -> str:", "def render_seal(img, text):\n return None", "def lyft_labels():\n\n return {\n 0: 'None',\n 7: 'Roads',\n 10: 'Vehicles'\n }", "def info(level):\n if level == 'basic':\n string = _(\"Basic markup\")\n text = _(\"Only basic text tags are available in this input field.\")\n elif level == 'rich':\n string = _(\"Rich markup\")\n text = _(\"Rich and basic text tags are available in this input field.\") \n elif level == 'full':\n string = _(\"Full markup\")\n text = _(\"Every tags are available in this input field.\") \n elif level == 'none':\n string = _(\"No markup\")\n text = _(\"No tags are available in this input field.\") \n\n if level != 'none':\n text = text + \" \" + _(\"Check the markup reminder in related documentation for a description of these tags.\")\n\n return '<span class=\"help\" title=' + quoteattr(text) \\\n + '><img src=\"' + settings.STATIC_MEDIA_URL \\\n + 'images/savane/common/misc.default/edit.png' \\\n + ' border=\"0\" class=\"icon\" alt=\"\" />' \\\n + string + '</span>'", "def tags(docid):\n page = request.args.get('page')\n filename = SETTINGS.LABELED_LOCATION + '/' + docid\n page_text = get_document_page(docid, page)\n if not os.path.isfile(filename):\n return spanify(page_text, page)\n else:\n with open(filename) as tokens_file:\n labels = json.load(tokens_file)\n return spanify(page_text, page, labels)", "def gremlin(self):\r\n initial = '{} = g.makeType().name(\"{}\").{}{}makeEdgeLabel()'\r\n primary_key = ''\r\n if self.primary_key:\r\n primary_key = \"primaryKey({}).\".format(self.primary_key)\r\n\r\n functional = \"functional().\" if self.functional else \"\"\r\n\r\n return initial.format(self.label, self.label, primary_key, functional)", "def label_hemispheres( x, template, templateLR, reg_iterations=[200,50,2,0] ):\n reg = ants.registration(\n ants.rank_intensity(x),\n ants.rank_intensity(template),\n 'SyN',\n aff_metric='GC',\n syn_metric='CC',\n syn_sampling=2,\n reg_iterations=reg_iterations,\n random_seed = 1 )\n return( ants.apply_transforms( x, templateLR, reg['fwdtransforms'],\n interpolator='genericLabel') )", "def generate_lookat_kml_block(self, lng, lat, viewrange):\n return \"\"\"\n <LookAt>\n <longitude>%.14f</longitude>\n <latitude>%.14f</latitude>\n <altitude>0</altitude>\n <range>%.f</range>\n <tilt>0</tilt>\n <heading>0</heading>\n </LookAt>\n\"\"\" % (lng, lat, viewrange)", "def get_template_tag(self):\n return \"{% dataset \" + self.cleantitle + \" %}\"", "def render(self):\n canvas_id = 'zdog_{}'.format(self.CANVAS_INDEX)\n illo_id = 'illo_{}'.format(self.CANVAS_INDEX)\n Scene.CANVAS_INDEX += 1\n\n html_lines = []\n\n js_lines = []\n\n euler = -rowan.to_euler(\n self.rotation, convention='xyz', axis_type='intrinsic')\n translation = self.translation*(1, -1, 1)\n\n pan_cfg = self.get_feature_config('pan')\n pan = pan_cfg.get('value', True) if pan_cfg is not None else False\n\n js_lines.append(\"\"\"\n let {illo_id} = new Zdog.Illustration({{\n element: '#{canvas_id}',\n zoom: {zoom},\n dragRotate: {rotation_enabled},\n rotate: {{x: {angle[0]}, y: {angle[1]}, z: {angle[2]}}},\n translate: {{x: {pos[0]}, y: {pos[1]}, z: {pos[2]}}},\n }});\n \"\"\".format(\n illo_id=illo_id, canvas_id=canvas_id, zoom=self.zoom*self.pixel_scale,\n angle=euler, pos=translation,\n rotation_enabled=('false' if pan else 'true')))\n\n config = self.get_feature_config('ambient_light')\n ambient_light = 0 if config is None else config.get('value', .4)\n\n config = self.get_feature_config('directional_light')\n directional_light = ([(0, 0, 0)] if config is None else\n config.get('value', [(0, 0, 0)]))\n directional_light = np.atleast_2d(directional_light)\n\n shapeIndex = 0\n for i, prim in enumerate(self._primitives):\n js_lines.extend(prim.render(\n rotation=self.rotation, illo_id=illo_id,\n name_suffix=i, ambient_light=ambient_light,\n directional_light=directional_light))\n\n (width, height) = map(int, self.size_pixels)\n html_lines.append(\"\"\"\n <canvas id=\"{canvas_id}\" width=\"{width}\" height=\"{height}\"></canvas>\n \"\"\".format(canvas_id=canvas_id, width=width, height=height))\n\n html_lines.append(\"\"\"<script>\n var fill_{canvas_id} = function() {{\n \"\"\".format(canvas_id=canvas_id))\n html_lines.append(LOCAL_HELPER_SCRIPT)\n html_lines.extend(js_lines)\n\n pan_snippet = \"\"\"\n new Zdog.Dragger({{\n startElement: {illo_id}.element,\n onDragStart: function( pointer, moveX, moveY) {{\n this.lastX = 0;\n this.lastY = 0;\n }},\n onDragMove: function( pointer, moveX, moveY ) {{\n let deltax = moveX - this.lastX;\n let deltay = moveY - this.lastY;\n let scale = 1.0/{illo_id}.zoom;\n {illo_id}.translate.x += deltax*scale;\n {illo_id}.translate.y += deltay*scale;\n this.lastX = moveX;\n this.lastY = moveY;\n }}\n }});\"\"\".format(illo_id=illo_id)\n if pan:\n html_lines.append(pan_snippet)\n\n html_lines.append(\"\"\"\n let this_canvas = document.querySelector(\"#{canvas_id}\");\n \"\"\".format(canvas_id=canvas_id))\n html_lines.append(\"\"\"\n let animate_{canvas_id} = function() {{\n if(is_in_view(this_canvas))\n {{\n {illo_id}.updateRenderGraph();\n }}\n if(document.contains(this_canvas))\n {{\n requestAnimationFrame(animate_{canvas_id});\n }}\n }};\n animate_{canvas_id}();\"\"\".format(canvas_id=canvas_id, illo_id=illo_id))\n # remove the global reference to this function after using it\n html_lines.append('fill_{canvas_id} = null;'.format(canvas_id=canvas_id))\n html_lines.append('};') # end of fill_{canvas_id}\n # now call fill_{canvas_id}, possibly after loading zdog\n html_lines.append(\"\"\"\n if (typeof Zdog == 'undefined')\n {{\n var script = document.createElement('script');\n script.addEventListener('load', fill_{canvas_id}, false);\n script.src = 'https://unpkg.com/zdog@1/dist/zdog.dist.min.js';\n document.getElementsByTagName('head')[0].appendChild(script);\n }}\n else\n fill_{canvas_id}();\n \"\"\".format(canvas_id=canvas_id))\n html_lines.append('</script>')\n\n return '\\n'.join(html_lines)", "def template1(self):\n self.indirectobject(1, 0, \"<<\\n /Type /Catalog\\n /Outlines 2 0 R\\n /Pages 3 0 R\\n>>\")\n self.indirectobject(2, 0, \"<<\\n /Type /Outlines\\n /Count 0\\n>>\")\n self.indirectobject(3, 0, \"<<\\n /Type /Pages\\n /Kids [4 0 R]\\n /Count 1\\n>>\")\n self.indirectobject(4, 0, \"<<\\n /Type /Page\\n /Parent 3 0 R\\n /MediaBox [0 0 612 792]\\n /Contents 5 0 R\\n /Resources <<\\n /ProcSet [/PDF /Text]\\n /Font << /F1 6 0 R >>\\n >>\\n>>\")\n self.indirectobject(6, 0, \"<<\\n /Type /Font\\n /Subtype /Type1\\n /Name /F1\\n /BaseFont /Helvetica\\n /Encoding /MacRomanEncoding\\n>>\")", "def render_string(font, string, features=None, pt_size=128):\n\n cmd = ['hb-view', '--font-size=%d' % pt_size]\n if font.instance_coordinates:\n location = ''\n for axis, val in font.instance_coordinates.items():\n location += '{}={}, '.format(axis, val)\n cmd += ['--variations=%s' % location]\n if features:\n # ignore aalt tag. This feat is used so users can access glyphs\n # via a glyph pallette.\n # https://typedrawers.com/discussion/1319/opentype-aalt-feature\n # glyphsapp will autogen this feature\n cmd += ['--features=%s' % ','.join(features).replace(\"aalt,\", \"\")]\n cmd += [font.path, u'--text={}'.format(string)]\n try:\n img = StringIO(subprocess.check_output(cmd))\n return Image.open(img)\n except FileNotFoundError:\n raise OSError(\n \"hb-view was not found. Check if Harbuzz is installed.\"\n )", "def get_template(self):\n return self.sep.join([self.htmls[html] for html in self.lang]).format(**self.fields)", "def formatted_label_string(self):\n return Template(self.zpl_template.template).safe_substitute(self.label_context)", "def _prov_html(self):\n ret = {\n 'rt_label': self.rt_label,\n 'uri': self.uri,\n 'uri_encoded': self.uri_encoded,\n 'label': self.label,\n 'nid': self.nid,\n 'gat': self.gat,\n 'rs_encoded': self.rs_encoded,\n 'rs_label': self.rs_label,\n 'sa': self.sa,\n 'ea': self.ea\n }\n\n prov_data = self._prov_rdf().serialize(format='turtle')\n\n return render_template(\n 'class_report_prov.html',\n report=ret,\n prov_data=prov_data\n )", "def testPangrams(self):\n from greeking.pangrams import PANGRAMS\n\n languages = list(PANGRAMS.keys())\n for language in languages:\n t = \"{% load greeking_tags %}{% pangram '\" + language + \"' %}\"\n self.render(t)\n self.assertRaises(\n TemplateSyntaxError,\n self.render,\n \"{% load greeking_tags %}{% pangram foobar %}\",\n )\n self.assertRaises(\n TemplateSyntaxError,\n self.render,\n \"{% load greeking_tags %}{% pangram en foobar %}\",\n )\n self.render(\"{% load greeking_tags %}{% pangram %}\")", "def addLabels(t):\n if not t.label:\n t.label = \"\".join([choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(4)])\n for r,w in t.children:\n addLabels(r)", "def lammps_cell_text(structure):\n\n cell_text = f\"\"\"\n0.0 {structure.cell[0, 0]} xlo xhi\n0.0 {structure.cell[1, 1]} ylo yhi\n0.0 {structure.cell[2, 2]} zlo zhi\n{structure.cell[1, 0]} {structure.cell[2, 0]} {structure.cell[2, 1]} xy xz yz\n\"\"\"\n\n return cell_text", "def lInLg(text):\n while re.search(r'(<lg>)(((?!</lg>).)*?)<(/?)p>', text, flags=re.DOTALL|re.IGNORECASE) is not None:\n text = re.sub(r'(<lg>)(((?!</lg>).)*?)<(/?)p>', r'\\1\\2<\\4l>', text, flags=re.DOTALL|re.IGNORECASE)\n return text", "def entry_page() -> 'html':\n return render_template('entry.html',\n enry_language_list = l_list,\n\t\t\t\t\t\t enry_language_list_01 = l_list_01,\n the_title='欢迎来到翻译吧')", "def litchi(args):\n p = OptionParser(litchi.__doc__)\n opts, args, iopts = p.set_image_options(args, figsize=\"9x6\")\n\n if len(args) != 4:\n sys.exit(not p.print_help())\n\n datafile, bedfile, slayout, switch = args\n fig = plt.figure(1, (iopts.w, iopts.h))\n root = fig.add_axes([0, 0, 1, 1])\n\n Synteny(fig, root, datafile, bedfile, slayout, switch=switch)\n\n # legend showing the orientation of the genes\n draw_gene_legend(root, 0.4, 0.7, 0.82)\n\n # On the left panel, make a species tree\n fc = \"lightslategrey\"\n\n coords = {}\n xs, xp = 0.16, 0.03\n coords[\"lychee\"] = (xs, 0.37)\n coords[\"clementine\"] = (xs, 0.5)\n coords[\"cacao\"] = (xs, 0.6)\n coords[\"strawberry\"] = (xs, 0.7)\n coords[\"grape\"] = (xs, 0.8)\n xs -= xp\n coords[\"Sapindales\"] = join_nodes(root, coords, \"clementine\", \"lychee\", xs)\n xs -= xp\n coords[\"Rosid-II\"] = join_nodes(root, coords, \"cacao\", \"Sapindales\", xs)\n xs -= xp\n coords[\"Rosid\"] = join_nodes(root, coords, \"strawberry\", \"Rosid-II\", xs)\n xs -= xp\n coords[\"crown\"] = join_nodes(root, coords, \"grape\", \"Rosid\", xs, circle=False)\n\n # Names of the internal nodes\n for tag in (\"Rosid\", \"Rosid-II\", \"Sapindales\"):\n nx, ny = coords[tag]\n nx, ny = nx - 0.01, ny - 0.02\n root.text(nx, ny, tag, rotation=90, ha=\"right\", va=\"top\", color=fc)\n\n root.set_xlim(0, 1)\n root.set_ylim(0, 1)\n root.set_axis_off()\n\n pf = \"litchi\"\n image_name = pf + \".\" + iopts.format\n savefig(image_name, dpi=iopts.dpi, iopts=iopts)", "def make_new_tag():\n return render_template('tags/new_tag.html')", "def _generate(self, markup=None):\n raise NotImplementedError", "def recog():\n\n return render_template('recog.html')", "def prescription(self):\n prescription = \"\\n{0:>10}\\t{1:>10}\\t{2:>10}\\t{3:>10}\\n\".format(\"R\",\"Material\",\"d\",\"diameter\")\n for surface in self.lensSurfaces():\n prescription += \"{0:>10.2f}\\t{1:>10}\\t{2:>10.2f}\\t{3:>10.2f}\\n\".format(surface.R, str(surface.mat), surface.spacing, surface.diameter)\n return prescription", "def __html__(self, tags:defaultdict) -> str:\n html = \"\"\n\n # Lens detail\n if tags['EXIF LensModel']:\n html += f\"<p class='lens'>{tags['EXIF LensModel']}</p>\\n\"\n \n # Focal length\n if tags['EXIF FocalLengthIn35mmFilm']:\n if tags['EXIF FocalLengthIn35mmFilm'] != tags['EXIF FocalLength']:\n html += f\"<p class='focal-length'>{tags['EXIF FocalLengthIn35mmFilm']}mm (full frame equivalent)</p>\\n\"\n else:\n html += f\"<p class='focal-length'>{tags['EXIF FocalLengthIn35mmFilm']}mm</p>\\n\"\n else:\n if tags['EXIF FocalLength']:\n html += f\"<p class='focal-length'>{tags['EXIF FocalLength']}mm</p>\\n\"\n\n # ISO, Shutter speed, Apperture\n if tags['EXIF ISOSpeedRatings']:\n html += f\"<p class='iso'>ISO {tags['EXIF ISOSpeedRatings']}</p>\\n\"\n if tags['EXIF ExposureTime']:\n html += f\"<p class='shutter-speed'>{tags['EXIF ExposureTime']} Second(s)</p>\\n\"\n if tags['EXIF FNumber']:\n from fractions import Fraction\n tags['EXIF FNumber'] = str(float(Fraction(str(tags['EXIF FNumber'])))) # Convert aperture to str i.e. 6.3\n html += f\"<p class='aperture'>f{tags['EXIF FNumber']}</p>\\n\"\n\n # Camera body details\n if tags['Image Make'] and tags['Image Model']:\n html += f\"<p class='camera-type'>{tags['Image Make']} {tags['Image Model']}</p>\\n\"\n elif tags['Image Make']:\n html += f\"<p class='camera-type'>{tags['Image Make']}</p>\\n\"\n elif tags[\"Image Model\"]:\n html += f\"<p class='camera-type'>{tags['Image Model']}</p>\\n\"\n else:\n ...\n return html", "def test_starting_template(checker):\n contents = labeled.contents(label=\"template\")\n _ = tomllib.loads(contents)", "def l10n_img(ctx, url):\n return static(l10n_img_file_name(ctx, url))", "def generate_leaflet(self):\n\n args = {}\n args['title'] = self.options.title.replace('\"', '\\\\\"')\n args['htmltitle'] = self.options.title\n args['south'], args['west'], args['north'], args['east'] = self.swne\n args['centerlon'] = (args['north'] + args['south']) / 2.\n args['centerlat'] = (args['west'] + args['east']) / 2.\n args['minzoom'] = self.tminz\n args['maxzoom'] = self.tmaxz\n args['beginzoom'] = self.tmaxz\n args['tilesize'] = self.tilesize # not used\n args['tileformat'] = self.tileext\n args['publishurl'] = self.options.url # not used\n args['copyright'] = self.options.copyright.replace('\"', '\\\\\"')\n\n s = \"\"\"<!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no' />\n <title>%(htmltitle)s</title>\n\n <!-- Leaflet -->\n <link rel=\"stylesheet\" href=\"http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.css\" />\n <script src=\"http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.js\"></script>\n\n <style>\n body { margin:0; padding:0; }\n body, table, tr, td, th, div, h1, h2, input { font-family: \"Calibri\", \"Trebuchet MS\", \"Ubuntu\", Serif; font-size: 11pt; }\n #map { position:absolute; top:0; bottom:0; width:100%%; } /* full size */\n .ctl {\n padding: 2px 10px 2px 10px;\n background: white;\n background: rgba(255,255,255,0.9);\n box-shadow: 0 0 15px rgba(0,0,0,0.2);\n border-radius: 5px;\n text-align: right;\n }\n .title {\n font-size: 18pt;\n font-weight: bold;\n }\n .src {\n font-size: 10pt;\n }\n\n </style>\n\n </head>\n <body>\n\n <div id=\"map\"></div>\n\n <script>\n /* **** Leaflet **** */\n\n // Base layers\n // .. OpenStreetMap\n var osm = L.tileLayer('http://{s}.tile.osm.org/{z}/{x}/{y}.png', {attribution: '&copy; <a href=\"http://osm.org/copyright\">OpenStreetMap</a> contributors'});\n\n // .. CartoDB Positron\n var cartodb = L.tileLayer('http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png', {attribution: '&copy; <a href=\"http://www.openstreetmap.org/copyright\">OpenStreetMap</a> contributors, &copy; <a href=\"http://cartodb.com/attributions\">CartoDB</a>'});\n\n // .. OSM Toner\n var toner = L.tileLayer('http://{s}.tile.stamen.com/toner/{z}/{x}/{y}.png', {attribution: 'Map tiles by <a href=\"http://stamen.com\">Stamen Design</a>, under <a href=\"http://creativecommons.org/licenses/by/3.0\">CC BY 3.0</a>. Data by <a href=\"http://openstreetmap.org\">OpenStreetMap</a>, under <a href=\"http://www.openstreetmap.org/copyright\">ODbL</a>.'});\n\n // .. White background\n var white = L.tileLayer(\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEAAQMAAABmvDolAAAAA1BMVEX///+nxBvIAAAAH0lEQVQYGe3BAQ0AAADCIPunfg43YAAAAAAAAAAA5wIhAAAB9aK9BAAAAABJRU5ErkJggg==\");\n\n // Overlay layers (TMS)\n var lyr = L.tileLayer('./{z}/{x}/{y}.%(tileformat)s', {tms: true, opacity: 0.7, attribution: \"%(copyright)s\"});\n\n // Map\n var map = L.map('map', {\n center: [%(centerlon)s, %(centerlat)s],\n zoom: %(beginzoom)s,\n minZoom: %(minzoom)s,\n maxZoom: %(maxzoom)s,\n layers: [osm]\n });\n\n var basemaps = {\"OpenStreetMap\": osm, \"CartoDB Positron\": cartodb, \"Stamen Toner\": toner, \"Without background\": white}\n var overlaymaps = {\"Layer\": lyr}\n\n // Title\n var title = L.control();\n title.onAdd = function(map) {\n this._div = L.DomUtil.create('div', 'ctl title');\n this.update();\n return this._div;\n };\n title.update = function(props) {\n this._div.innerHTML = \"%(title)s\";\n };\n title.addTo(map);\n\n // Note\n var src = 'Generated by <a href=\"http://www.klokan.cz/projects/gdal2tiles/\">GDAL2Tiles</a>, Copyright &copy; 2008 <a href=\"http://www.klokan.cz/\">Klokan Petr Pridal</a>, <a href=\"http://www.gdal.org/\">GDAL</a> &amp; <a href=\"http://www.osgeo.org/\">OSGeo</a> <a href=\"http://code.google.com/soc/\">GSoC</a>';\n var title = L.control({position: 'bottomleft'});\n title.onAdd = function(map) {\n this._div = L.DomUtil.create('div', 'ctl src');\n this.update();\n return this._div;\n };\n title.update = function(props) {\n this._div.innerHTML = src;\n };\n title.addTo(map);\n\n\n // Add base layers\n L.control.layers(basemaps, overlaymaps, {collapsed: false}).addTo(map);\n\n // Fit to overlay bounds (SW and NE points with (lat, lon))\n map.fitBounds([[%(south)s, %(east)s], [%(north)s, %(west)s]]);\n\n </script>\n\n </body>\n </html>\n\n \"\"\" % args # noqa\n\n return s", "def visualize(fd, pos_tags=None):\n if pos_tags is not None:\n fd = {t: f for t, f in fd.items() if t.pos in pos_tags}\n color = {pos.tag: color.hex for pos, color in COLOR.items()}\n frequencies = sorted(fd.values())\n font_size = rescale(frequencies, range(75, 351))\n html = '\\n'.join(\n f'''<font\n color=\"{color[t.pos]}\"\n title=\"{t.lemma}/{t.pos} ({f})\"\n style=\"font-size: {font_size(f)}%\"\n >\n {t.lemma}\n </font>''' for t, f in fd.items()\n )\n return html", "def initial_representations():\n cmd.hide('everything', 'all')\n cmd.show('cartoon', 'all')\n cmd.select('ligand', 'resn NFT')\n cmd.deselect()\n cmd.show(\"sticks\", \"ligand\")", "def _add_template(self):\n template_dir = os.path.join(self.label_path, 'standard',\n 'MNI152_T1_2mm_brain.nii.gz')\n template_name = QFileDialog.getOpenFileName(\n self,\n 'Open standard file',\n template_dir,\n 'Nifti files (*.nii.gz *.nii)')\n if not template_name.isEmpty():\n template_path = str(template_name)\n self._add_img(template_path)", "def lang_genoeg(lengte):\n return", "def buildGSUB(self):\n\t\t# Construct GSUB table bottom-up.\n\t\tli_fi = Ligature()\n\t\tli_fi.LigGlyph = 'f_i'\n\t\tli_fi.Component = ['i']\n\t\tli_fi.CompCount = 2\n\n\t\tliSubst = LigatureSubst()\n\t\tliSubst.ligatures = {'f': li_fi}\n\t\tliSubst.Format = 1\n\t\tliSubst.LookupType = 4\n\n\t\tlookup = Lookup()\n\t\tlookup.LookupType = 4 # Ligature\n\t\tlookup.LookupFlag = 0\n\t\tlookup.SubTable = [liSubst]\n\t\tlookup.SubTableCount = len(lookup.SubTable)\n\n\t\tlookupList = LookupList()\n\t\tlookupList.Lookup = [lookup]\n\t\tlookupList.LookupCount = len(lookupList.Lookup)\n\n\t\tfea = Feature()\n\t\tfea.FeatureParams = None\n\t\tfea.LookupCount = 1\n\t\tfea.LookupListIndex = [0]\n\n\t\tfeaRecord = FeatureRecord()\n\t\tfeaRecord.FeatureTag = 'liga'\n\t\tfeaRecord.Feature = fea\n\n\t\tfeaList = FeatureList()\n\t\tfeaList.FeatureRecord = [feaRecord]\n\t\tfeaList.FeatureCount = len(feaList.FeatureRecord)\n\n\t\tlangSys = LangSys()\n\t\tlangSys.LookupOrder = None\n\t\tlangSys.ReqFeatureIndex = 0xFFFF\n\t\tlangSys.FeatureIndex = [0]\n\t\tlangSys.FeatureCount = len(langSys.FeatureIndex)\n\n\t\tsct = Script()\n\t\tsct.DefaultLangSys = langSys\n\t\tsct.LangSysRecord = []\n\t\tsct.LangSysCount = len(sct.LangSysRecord)\n\n\t\tsctRec = ScriptRecord()\n\t\tsctRec.ScriptTag = 'tag1'\n\t\tsctRec.Script = sct\n\n\t\tsctList = ScriptList()\n\t\tsctList.ScriptRecord = [sctRec]\n\t\tsctList.ScriptCount = len(sctList.ScriptRecord)\n\n\t\tgsub = GSUB()\n\t\tgsub.LookupList = lookupList\n\t\tgsub.FeatureList = feaList\n\t\tgsub.ScriptList = sctList\n\n\t\ttable = ttLib.newTable('GSUB')\n\t\ttable.table = gsub\n\t\treturn table", "def make_lexicon_words_txt(self):\n raise NotImplementedError", "def generate_tech_preview(self):\n name = self.image.get('name')\n if '/' in name:\n family, name = name.split('/')\n self.image['name'] = \"%s-tech-preview/%s\" % (family, name)\n else:\n self.image['name'] = \"%s-tech-preview\" % name", "def hello_name(user):\r\n return render_template('edurekaHello.html', name = user)", "def _linked_feature_label(linked_feature):\n\treturn \"\"\"<\n <B>{name}</B><BR />\n F={num_features} D={projected_dim}<BR />\n {fml}<BR />\n <U>{source_translator}</U><BR />\n <I>{source_layer}</I>\n >\"\"\".format(\n\t\tname=linked_feature.name, num_features=linked_feature.size, projected_dim=linked_feature.embedding_dim, fml=linked_feature.fml, source_translator=linked_feature.source_translator, source_layer=linked_feature.source_layer\n\t)", "def render( *args, **kwargs ):", "def html_page():\n return render_template('Map_twitter.html')", "def _encode_tilename(self, llx, lly, shortform=False):\n return self.encode_tilename(llx,\n lly,\n self.core.sampling,\n self.core.tiletype,\n shortform=shortform)", "def get_labels(orthographic: str):\n labels = []\n tmp = ''\n tag = False\n\n # Get all labels from orthographic form\n for char in orthographic:\n if char == '[':\n tag = True\n elif char == ']':\n labels.append(tmp)\n tag = False\n tmp = ''\n elif tag:\n tmp += char\n return labels", "def _select_ligand_template(\n self,\n klifs_kinase_id: int,\n ligand: oechem.OEMolBase,\n dfg: Union[str or None],\n ac_helix: Union[str or None],\n ) -> pd.Series:\n import pandas as pd\n\n from ..utils import LocalFileStorage\n\n logging.debug(\"Searching kinase information from KLIFS ...\")\n klifs_kinases = pd.read_csv(LocalFileStorage.klifs_kinase_db(self.cache_dir))\n reference_pocket = klifs_kinases[\n klifs_kinases[\"kinase.klifs_id\"] == klifs_kinase_id\n ][\"kinase.pocket\"].iloc[0]\n reference_pocket = reference_pocket.replace(\"_\", \"\")\n\n logging.debug(\"Retrieve kinase structures from KLIFS for ligand template selection ...\")\n structures = self._get_available_ligand_templates()\n\n if dfg:\n logging.debug(f\"Filtering for ligands bound to a kinase in the DFG {dfg} conformation ...\")\n structures = structures[structures[\"structure.dfg\"] == dfg]\n\n if ac_helix:\n logging.debug(f\"Filtering for ligands bound to a kinase in the alpha C helix {dfg} conformation ...\")\n structures = structures[structures[\"structure.ac_helix\"] == ac_helix]\n\n logging.debug(\"Storing SMILES in structures dataframe ...\")\n structures = self._add_smiles_column(structures)\n\n logging.debug(\"Searching for identical co-crystallized ligands ...\")\n identical_ligands = self._get_identical_ligand_indices(ligand, structures[\"smiles\"]) # TODO: Takes surprisingly long\n\n if len(identical_ligands) > 0:\n logging.debug(\"Found identical co-crystallized ligands ...\")\n structures = structures.iloc[identical_ligands]\n logging.debug(\"Searching for matching KLIFS kinase id ...\")\n if (structures[\"kinase.klifs_id\"] == klifs_kinase_id).any():\n logging.debug(\"Found matching KLIFS kinase id ...\")\n structures = structures[structures[\"kinase.klifs_id\"] == klifs_kinase_id]\n else:\n if self.shape_overlay:\n logging.debug(\"Filtering for most similar ligands according to their shape overlay ...\")\n structures = self._filter_for_similar_ligands_3d(ligand, structures)\n else:\n logging.debug(\"Filtering for most similar ligands according to their fingerprints ...\")\n structures = self._filter_for_similar_ligands_2d(ligand, structures)\n\n logging.debug(\"Filtering for most similar kinase pockets ...\")\n structures = self._filter_for_similar_kinase_pockets(reference_pocket, structures)\n\n logging.debug(\"Picking structure with highest KLIFS quality ...\")\n structure_for_ligand = structures.iloc[0]\n\n return structure_for_ligand", "def generateImage(self):\n self.image = self.font.render(self.text, True, self.color)\n self.rect = self.image.get_rect()\n self.rect.center = self.xy", "def templateMappings(self):\n raise NotImplementedError", "def plot_local_imp(parsed_sentence, word_importances, max_alpha=0.5):\n # Prevent special characters like & and < to cause the browser...\n # to display something other than what you intended.\n def html_escape(text):\n return html.escape(text)\n\n word_importances = 100.0 * word_importances / (np.sum(np.abs(word_importances)))\n\n highlighted_text = []\n for i, word in enumerate(parsed_sentence):\n weight = word_importances[i]\n if weight > 0:\n highlighted_text.append(\n '<span style=\"background-color:rgba(135,206,250,'\n + str(abs(weight) / max_alpha)\n + ');\">'\n + html_escape(word)\n + \"</span>\"\n )\n elif weight < 0:\n highlighted_text.append(\n '<span style=\"background-color:rgba(250,0,0,'\n + str(abs(weight) / max_alpha)\n + ');\">'\n + html_escape(word)\n + \"</span>\"\n )\n else:\n highlighted_text.append(word)\n\n highlighted_text = \" \".join(highlighted_text)\n display(HTML(highlighted_text))", "def dftb_geom(name): \n dftb_geom = \"\"\"Geometry = GenFormat {\n <<< \"{{ title }}\"\n }\n \"\"\"\n return Environment().from_string(dftb_geom).render(title=name)", "def enlabel(mi_, ma_):\n\treturn \"Unicode characters from {} to {} codepoints\".format(mi_, ma_)", "def generate_garmin_kml(self, d ):\n return (\"\"\"\n <GroundOverlay>\n <Icon>\n <href>%(image_url)s</href>\n <DrawOrder>%(draw_order)d</DrawOrder>\n </Icon>\n <LatLonBox>\n <north>%(north).14f</north>\n <south>%(south).14f</south>\n <east>%(east).14f</east>\n <west>%(west).14f</west>\n </LatLonBox>\n </GroundOverlay>\"\"\" % d )", "def regression_page():\n return render_template(\"regr-matmortality.html\")", "def get_grades_d3():\n return render_template(\"grades_d3.html\")", "def specify_feature_content(arc_layer, label_dict):\n # get the AnnotationProps, that lead to the Labelrenderer and the Symbol\n feature_layer = change_interface(arc_layer, ArcGisModules.module_carto.IGeoFeatureLayer)\n annotation_parent_layer = change_interface(\n feature_layer.AnnotationProperties,\n ArcGisModules.module_carto.IAnnotateLayerPropertiesCollection2\n )\n label_engine = change_interface(\n annotation_parent_layer.Properties(0),\n ArcGisModules.module_carto.ILabelEngineLayerProperties2\n )\n if feature_layer.DisplayFeatureClass.ShapeType == 3:\n label_placement = '2'\n else:\n label_placement = '0'\n label_dict['labelValues']['placement']['placement'] = label_placement\n\n expression = label_engine.Expression\n label_dict['labelValues']['type'] = 'simple'\n label_dict['labelValues']['text-style']['fieldName'] = expression[1:-1]\n\n if annotation_parent_layer.Properties(0).AnnotationMaximumScale > 0.0 \\\n or annotation_parent_layer.Properties(0).AnnotationMinimumScale > 0.0:\n label_dict['labelValues']['rendering']['scaleVisibility'] = '1'\n label_dict['labelValues']['rendering']['scaleMax'] = unicode(\n annotation_parent_layer.Properties(0).AnnotationMinimumScale\n )\n label_dict['labelValues']['rendering']['scaleMin'] = unicode(\n annotation_parent_layer.Properties(0).AnnotationMaximumScale\n )\n\n symbol = label_engine.Symbol\n return symbol", "def Template(Fenetre_largeur,Fenetre_hauteur):\r\n li= Select_ligne(\"Nombre de lignes: \",Fenetre_largeur,Fenetre_hauteur)\r\n nom=\"Template\"\r\n fich=\"Template\"\r\n version=0\r\n while Path(\"stages/\"+fich+\".txt\").is_file() == True:\r\n version+=1\r\n fich=nom+str(version)\r\n fichier=open(\"stages/\"+fich+\".txt\",'w')\r\n fichier.write(str(li))\r\n fichier.write(\"\\n\")\r\n fichier.write(\"\\n\")\r\n for i in range(li):\r\n for j in range(10):\r\n fichier.write(\"0,0|\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"gauche: resistance, droite: bonus\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"resistance max: 3\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"6=barre+\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"7=score+\")\r\n fichier.close()", "def machinelearn():\n return render_template('machinelearn.html')", "def navebarre_prediction(request):\r\n return render(request, 'menu/navebarre_prediction.html')", "def get_template_name(self):\n base = super(ExcerptInline, self).get_template_name()[0]\n path = \"editorial/%s\" % base\n return path", "def write_label(self, contig_name, width, height, font, title_width, upper_left, vertical_label,\n strand, canvas, horizontal_centering=False, center_vertical=False, chop_text=True,\n label_color=(50, 50, 50, 255)):\n upper_left = list(upper_left) # to make it mutable\n shortened = contig_name[-title_width:] # max length 18. Last characters are most unique\n txt = Image.new('RGBA', (width, height))#, color=(0,0,0,50))\n txt_canvas = ImageDraw.Draw(txt)\n text_width = txt_canvas.textsize(shortened, font)[0]\n if not chop_text and text_width > width:\n txt = Image.new('RGBA', (text_width, height)) # TODO performance around txt_canvas\n txt_canvas = ImageDraw.Draw(txt)\n if center_vertical or vertical_label: # Large labels are centered in the column to look nice,\n # rotation indicates strand in big text\n vertically_centered = (height // 2) - multi_line_height(font, shortened, txt)//2\n else: # Place label at the beginning of gene based on strand\n vertically_centered = height - multi_line_height(font, shortened, txt) # bottom\n if strand == \"+\":\n vertically_centered = 0 # top of the box\n txt_canvas.multiline_text((0, max(0, vertically_centered)), shortened, font=font,\n fill=label_color)\n if vertical_label:\n rotation_direction = 90 if strand == '-' else -90\n txt = txt.rotate(rotation_direction, expand=True)\n upper_left[1] += -4 if strand == '-' else 4\n if horizontal_centering:\n margin = width - text_width\n upper_left[0] += margin // 2\n canvas.paste(txt, (upper_left[0], upper_left[1]), txt)", "def create_image_caption_pairs(self):", "def gen_html_report(landmarks_list, usage_flag, output_folder):\n labelled_landmarks = landmarks_list[0]\n\n if usage_flag == 2:\n detected_landmarks = landmarks_list[1]\n assert len(labelled_landmarks.keys()) == len(detected_landmarks.keys())\n\n # sort the labelled landmarks according to detection error\n error_summary = error_analysis(labelled_landmarks, detected_landmarks)\n\n landmark_name_list = labelled_landmarks[list(labelled_landmarks.keys())[0]].keys()\n for landmark_idx, landmark_name in enumerate(landmark_name_list):\n print(\"Generating html report for landmark {}: {}.\".format(landmark_idx, landmark_name))\n image_link_template = r\"<div class='content'><img border=0 src= '{0}' hspace=1 width={1} class='pic'></div>\"\n error_info_template = r'<b>Labelled</b>: [{0:.2f}, {1:.2f}, {2:.2f}];'\n document_text = r'\"<h1>check predicted coordinates:</h1>\"'\n document_text += \"\\n\"\n\n if usage_flag == 1:\n image_list = list(labelled_landmarks.keys())\n for image_idx, image_name in enumerate(image_list):\n label_landmark_world = labelled_landmarks[image_name][landmark_name]\n document_text = \\\n gen_row_for_html(usage_flag, image_link_template, error_info_template,\n document_text, image_list, image_idx, landmark_idx,\n [label_landmark_world], None)\n \n elif usage_flag == 2:\n image_list = error_summary.all_cases[landmark_name]\n error_sorted_index = error_summary.error_sorted_index\n for image_idx in error_sorted_index[landmark_name]:\n image_name = image_list[image_idx]\n label_landmark_world = labelled_landmarks[image_name][landmark_name]\n detected_landmark_world = detected_landmarks[image_name][landmark_name]\n error_info_template = r'<b>Labelled</b>: [{0:.2f}, {1:.2f}, {2:.2f}];'\n error_info_template += r'<b>Detected</b>: [{3:.2f}, {4:.2f}, {5:.2f}]; '\n error_info_template += r'<b>Type</b>: {6};'\n error_info_template += r'<b>Error</b>: x:{7:.2f}; y:{8:.2f}; z:{9:.2f}; L2:{10:.2f};'\n document_text = \\\n gen_row_for_html(usage_flag, image_link_template, error_info_template,\n document_text, image_list, image_idx, landmark_name,\n [label_landmark_world, detected_landmark_world],\n error_summary)\n\n else:\n raise ValueError('Undefined usage flag!')\n\n if usage_flag == 1:\n analysis_text = gen_analysis_text(len(image_list), usage_flag,\n labelled_landmarks, landmark_name, None)\n\n elif usage_flag == 2:\n analysis_text = gen_analysis_text(len(image_list), usage_flag,\n labelled_landmarks, landmark_name, error_summary)\n\n else:\n raise ValueError('Undefined usage float!')\n\n html_report_name = 'result_analysis.html'\n html_report_folder = os.path.join(output_folder, 'lm{}'.format(landmark_idx))\n if not os.path.isdir(html_report_folder):\n os.makedirs(html_report_folder)\n \n html_report_path = os.path.join(html_report_folder, html_report_name)\n write_html_report_for_single_landmark(document_text, analysis_text, html_report_path, width=200)\n\n if usage_flag == 2:\n summary_csv_report_name = 'summary.csv'\n summary_csv_path = os.path.join(output_folder, summary_csv_report_name)\n write_summary_csv_report_for_all_landmarks(error_summary, summary_csv_path)", "def bb_labelname(hit):\n try:\n real_name = hit.group(1)\n L = Label.objects.get(name=real_name)\n T = loader.get_template('webview/t/label.html')\n C = Context({ 'L' : L })\n return T.render(C)\n except:\n # This will throw if the requested label is spelt incorrectly, or doesnt exist\n return '<img src=\"/static/transmit.png\" alt=\"Invalid Label\" border=\"0\" /> %s' % (real_name)", "def text():\n return {\n \"@context\": \"http://www.w3.org/ns/anno.jsonld\",\n \"type\": \"Annotation\",\n \"body\": {\n \"creator\": \"user\",\n \"type\": \"TextualBody\",\n \"value\": \"string\"\n },\n \"generator\": {\n \"homepage\": \"http://mnemosyne.ml\",\n \"id\": \"string\",\n \"name\": \"Mnemosyne\",\n \"type\": \"Mnemosyne\"\n },\n \"target\": {\n \"id\": \"string\",\n \"type\": \"TextQuoteSelector\",\n \"exact\": \"string\",\n \"format\": \"string\",\n \"source\": \"string\",\n \"prefix\": 0,\n \"suffix\": 0,\n \"refinedBy\": {\n \"type\": \"TextPositionSelector\",\n \"start\": \"/div[2]\",\n \"end\": \"/div[2]\"\n },\n },\n }", "def render_ents(self, text, spans, title):\n markup = ''\n offset = 0\n for span in spans:\n label = span['label']\n start = span['start']\n end = span['end']\n entity = text[start:end]\n fragments = text[offset:start].split('\\n')\n for i, fragment in enumerate(fragments):\n markup += fragment\n if len(fragments) > 1 and i != len(fragments)-1:\n markup += '</br>'\n if self.ents is None or label.upper() in self.ents:\n color = self.colors.get(label.upper(), self.default_color)\n markup += TPL_ENT.format(label=label, text=entity, bg=color)\n else:\n markup += entity\n offset = end\n markup += text[offset:]\n markup = TPL_ENTS.format(content=markup, colors=self.colors)\n if title:\n markup = TPL_TITLE.format(title=title) + markup\n return markup", "def t4_ligase(self, ligase_type=None):\n ligases = {\"neb\": {\"buffer\": self.neb_t4ligase_buffer,\n \"ligase\": self.neb_t4ligase},\n \"thermo\": {\"buffer\": self.thermo_t4ligase_buffer,\n \"ligase\": self.thermo_t4ligase}}\n return ligases.get(ligase_type)", "def testPlaceKittens(self):\n t1 = \"{% load greeking_tags %}{% placekitten 200 200 %}\"\n ctx, out = self.render(t1)\n self.assertEqual(out, '<img src=\"http://placekitten.com/200/200/\"/>')\n self.assertRaises(\n TemplateSyntaxError,\n self.render,\n \"{% load greeking_tags %}{% placekitten foobar %}\",\n )", "def create_label(image_path, camera_name, ptu_angle, IMU_data,):\n\n with exiftool.ExifTool() as et:\n flmeta = et.get_tag('FocalLength', image_path)\n #meta = et.get_metadata(file_path)\n\n #focal_length = '{}'.format(meta['EXIF FocalLength'])\n #focal_length = '{}'.format(meta['XMP:FocalLength'])\n focal_length = '{}'.format(flmeta)\n\n #pp = ptu_dict['pp']\n #tp = ptu_dict['tp']\n az = ptu_angle[0]\n el = ptu_angle[1]\n\n try:\n IMU_quaternion = IMU_data[\"quat\"]\n except Exception as e:\n logger.exception(\"Quaternion Data Not Found\")\n logger.exception(e)\n IMU_quaternion = None\n \n yaml_path = os.path.splitext(image_path)[0]\n contents = {\n 'AZIMUTH': az,\n 'ELEVATION': el,\n # 'PP': float(pp),\n # 'TP': float(tp),\n 'f': float(focal_length),\n # 'pr': ptu.pan_res(),\n # 'tr': ptu.tilt_res(),\n # 'temp': ptu.ptu_temp(),\n 'Camera': camera_name,\n 'below values obtained by stereosim IMU:': 'see below',\n '': IMU_data,\n 'IMU_quaternion': IMU_quaternion\n }\n with open('{}.lbl'.format(yaml_path), 'w') as lblfile:\n yaml.dump(contents, lblfile, default_flow_style=False)", "def preview(request):\n ctx = {}\n \n ctx[\"area\"] = \"bookmarks\"\n ctx[\"preview_theme\"] = request.GET.get(\"t\", \"light\")\n ctx[\"bookmarks\"] = Bookmark.by_user(request.user)[:5]\n \n return TemplateResponse(request, \"users/preview.html\", ctx)", "def lime_explanation_text(self, instance_ind, class_names=None):\n return text_explanation_with_lime(self.x_train, instance_ind, self.model, class_name=class_names)", "def index() -> object:\n return render_template('ue_bootstrap.j2', title='UENERGO TAGS')", "def get_mapdata():\n return render_template(\"l_heatmap.html\")", "def generate(net, z, maxlen=50, im=None, init=None, use_end=True):\n caption = lm_tools.sample(net, z['word_dict'], z['index_dict'], num=maxlen, Im=im, initial=init, use_end=use_end)\n print ' '.join(caption)", "def makeTermType(tag, keyword, G):\n string = \"<%s>\" % tag\n\n l = G(\"%sLSL\" % keyword)\n lsym = G(\"%sLSLSymbol\" % keyword)\n s = G(\"%sS\" % keyword)\n mult = G(\"%sLSMultiplicity\" % keyword)\n senior = G(\"%sLSSeniority\" % keyword)\n\n if l and s:\n string += \"<LS>\"\n string += \"<L><Value>%s</Value>\"% l\n if lsym: string += \"<Symbol>%s</Symbol>\" % lsym\n string += \"</L><S>%s</S>\" % s\n if mult: string += \"<Multiplicity>%s</Multiplicity>\" % mult\n if senior: string += \"<Seniority>%s</Seniority>\" % senior\n string += \"</LS>\"\n\n jj = makeiter(G(\"%sJJ\" % keyword))\n if jj:\n string += \"<jj>\"\n for j in jj:\n string += \"<j>%s</j>\" % j\n string += \"</jj>\"\n j1j2 = makeiter(G(\"%sJ1J2\" % keyword))\n if j1j2:\n string += \"<j1j2>\"\n for j in j1j2:\n string += \"<j>%s</j>\" % j\n string += \"</j1j2>\"\n K = G(\"%sK\" % keyword)\n if K:\n string += \"<jK>\"\n j = G(\"%sJKJ\" % keyword)\n if j:\n string += \"<j>%s</j>\" % j\n S2 = G(\"%sJKS\" % keyword)\n if S2:\n string += \"<S2>%s</S2>\" % S2\n string += \"<K>%s</K>\" % K\n string += \"</jK>\"\n l = G(\"%sLKL\" % keyword)\n k = G(\"%sLKK\" % keyword)\n if l and k:\n string += \"<LK>\"\n string += \"<L><Value>%s</Value><Symbol>%s</Symbol></L>\" % (l, G(\"%sLKLSymbol\" % keyword))\n string += \"<K>%s</K>\" % k\n string += \"<S2>%s</S2>\" % G(\"%sLKS2\" % keyword)\n string += \"</LK>\"\n tlabel = G(\"%sLabel\" % keyword)\n if tlabel:\n string += \"<TermLabel>%s</TermLabel>\" % tlabel\n string += \"</%s>\" % tag\n return string", "def img_render(kve, lopt_str, sopt_str, gopt_str, popt_str, glopt_str, img_path):\n i_before = ''\n i_layer = ''\n i_after = ''\n i_label_str_html = ''\n if 'img' in kve:\n img_paths = [x.strip() for x in kve['img'].split(':')]\n for opt_str in glopt_str, popt_str, gopt_str, sopt_str, lopt_str:\n if 'autoilabel' in opt_str:\n i_label_str = os.path.splitext(os.path.basename(img_paths[0]))[0]\n i_label_str_html = ' <div class=\"label bottom\">' \\\n + i_label_str + '</div>'\n if 'ilabel' in kve:\n i_label_str = kve['ilabel']\n i_label_str_html = ' <div class=\"label bottom\">' \\\n + i_label_str + '</div>'\n img_tag_str = ''\n for idx, path in enumerate(img_paths):\n img_tag_str = img_tag_str + '<img src=\"' + img_path + img_paths[idx] + '\"/>'\n for opt_str in [glopt_str, popt_str, gopt_str, sopt_str, lopt_str]:\n if 'ibefore' in opt_str:\n i_before = ' <div class=\"layout ' + lopt_str \\\n + '\"><div class=\"img\">' + img_tag_str + '</div>' \\\n + i_label_str_html + '</div>'\n if 'iafter' in opt_str:\n i_after = ' <div class=\"layout ' + lopt_str \\\n + '\"><div class=\"img\">' + img_tag_str + '</div>' \\\n + i_label_str_html + '</div>'\n if not (i_before or i_after):\n i_layer = ' <div class=\"img\">' + img_tag_str + '</div>'\n return i_before, i_layer, i_after\n return '', '', ''", "def ref_tag_preprocess(inp_tag_text):\n\n # some words in references\n inp_tag_text = inp_tag_text.replace(\"до римлян\", \"Римл\")\n inp_tag_text = inp_tag_text.replace(\" и \", \"; \")\n inp_tag_text = inp_tag_text.replace(\" і \", \"; \")\n inp_tag_text = inp_tag_text.replace(\"–\", \"-\")\n # replacing \"'\" to \"’\", it is similar in Ukrainian\n inp_tag_text = inp_tag_text.replace(\"'\", \"’\")\n return inp_tag_text" ]
[ "0.5381906", "0.5355605", "0.5339362", "0.5280232", "0.5247648", "0.52295977", "0.52190715", "0.51928127", "0.51736635", "0.5143805", "0.5141572", "0.5127012", "0.50930816", "0.50833154", "0.50804555", "0.5062896", "0.5048909", "0.50279593", "0.50187063", "0.50133497", "0.5010512", "0.49797472", "0.49714065", "0.4965754", "0.49595279", "0.49549994", "0.49403268", "0.4938844", "0.4934257", "0.49329627", "0.49210343", "0.4911111", "0.49103227", "0.49090385", "0.49071732", "0.48979637", "0.48956564", "0.4889836", "0.4889225", "0.48818815", "0.48814288", "0.48675665", "0.48661348", "0.48657113", "0.48615336", "0.48585817", "0.48552674", "0.48547867", "0.48534536", "0.48500153", "0.4847796", "0.4847634", "0.4847427", "0.4842911", "0.48404598", "0.4829609", "0.48111513", "0.4807814", "0.48058036", "0.4792929", "0.47852105", "0.47841743", "0.47838426", "0.47761548", "0.47692895", "0.47680494", "0.47678804", "0.4767862", "0.4766356", "0.47663113", "0.47642547", "0.4759072", "0.47523832", "0.47476432", "0.4747538", "0.47447887", "0.47422037", "0.4730227", "0.4721368", "0.47174868", "0.47159475", "0.4715262", "0.47066748", "0.46969947", "0.4695577", "0.46951514", "0.46938452", "0.4687098", "0.46869326", "0.46833766", "0.46815643", "0.46802825", "0.46749532", "0.46680272", "0.46637636", "0.4662005", "0.46577066", "0.46545625", "0.46509448", "0.4648521", "0.46449116" ]
0.0
-1
True if two bonds collide, false otherwise. Note that false is retrieved even in case the bonds share common atom, as this is not a problem case. Cramer's rule is used for the linear equations system.
def _intersection(self, bondA, bondB): atoms = [ bondA.GetBeginAtom(), bondA.GetEndAtom(), bondB.GetBeginAtom(), bondB.GetEndAtom(), ] names = [a.GetProp("name") for a in atoms] points = [self.conformer.GetAtomPosition(a.GetIdx()) for a in atoms] vecA = Geometry.Point2D(points[1].x - points[0].x, points[1].y - points[0].y) vecB = Geometry.Point2D(points[3].x - points[2].x, points[3].y - points[2].y) # we need to set up directions of the vectors properly in case # there is a common atom. So we identify angles correctly # e.g. B -> A; B -> C and not A -> B; C -> B. if len(set(names)) == 3: angle = self.__get_angle(names, vecA, vecB) return angle < 10.0 # Cramer's rule to identify intersection det = vecA.x * -vecB.y + vecA.y * vecB.x if round(det, 2) == 0.00: return False a = points[2].x - points[0].x b = points[2].y - points[0].y detP = (a * -vecB.y) - (b * -vecB.x) p = round(detP / det, 3) if p < 0 or p > 1: return False detR = (vecA.x * b) - (vecA.y * a) r = round(detR / det, 3) if 0 <= r <= 1: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_bond_crossing(self):\n return self.count_bond_collisions() > 0", "def collide(b1,b2):\n if mag(b1.pos-b2.pos) < (b1.radius + b2.radius - .05):\n return True", "def accurate_collision(self, other) -> bool:\r\n if self.collide:\r\n if self.bbox_intersect(other):\r\n offset = round(self.x - other.x), \\\r\n round(self.y - other.y)\r\n if self.mask.overlap(other.mask, offset): # Overlap returns None or 1 point\r\n return True\r\n return False\r\n else:\r\n return False", "def ifCollide( ball1, ball2 ):\n\t\n\tb1_x, b1_y = ball1.position.xy\n\tb2_x, b2_y = ball2.position.xy\n\t\n\t#vector connect center of particles\n\tdistant = Vector.from_points((b2_x, b2_y), (b1_x, b1_y))\n\t\n\t#if lenght of vector above is less( equal ) than sum of radius ( they overlapping )\n\tif ( ball1.r + ball2.r ) ** 2 >= distant.norm():\n\t\treturn True\n\telse:\n\t\treturn False", "def _resolve_ball_collisions(self) -> bool:\n\n bln_naughty = True\n lng_naughty_loop_count = 0\n lng_naughty_loop_limit = 10\n while bln_naughty:\n lng_naughty_loop_count += 1\n if lng_naughty_loop_count > lng_naughty_loop_limit:\n return False\n bln_naughty = False\n\n \"\"\" Ball vs Ball \"\"\"\n for sprBall1, sprBall2 in TrashyPhysics.collision_pairs_self(\n self.grpBalls, fncCollided=TrashyPhysics.balls_collided):\n bln_naughty = True\n TrashyPhysics.bounce_balls(sprBall1, sprBall2)\n\n \"\"\" Ball vs Bot \"\"\"\n for sprBall, sprRobot in TrashyPhysics.collision_pairs(\n self.grpBalls, self.grpRobots,\n fncCollided=TrashyPhysics.ball_robot_collided):\n bln_naughty = True\n TrashyPhysics.bounce_ball_off_bot(sprRobot, sprBall)\n\n \"\"\" Ball vs Wall \"\"\"\n for ball in filter(lambda x: TrashyPhysics.collided_wall(x), self.lstBalls):\n bln_naughty = True\n TrashyPhysics.bounce_ball_off_wall(ball)\n\n \"\"\" Ball vs Bumper \"\"\"\n # todo\n\n return True", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def collides(self, other):\r\n for block in self.blocks:\r\n for obstacle in other.blocks:\r\n if block.col == obstacle.col and block.row == obstacle.row:\r\n return True\r\n return False", "def collides_with(self, bird):\n return pygame.sprite.collide_mask(self, bird)", "def check_bond(atom1, atom2):\n check = False\n for bond in bond_list:\n if (((bond.identity == get_bond_id(atom1, atom2)[0]) or (bond.identity == get_bond_id(atom1, atom2)[1])) and 0.975 * bond.length <= calculate_3D_distance_2_atoms(atom1, atom2) <= 1.025 * bond.length):\n check = True\n break\n return check", "def detect_collision():\n # with the top and bottom of screen\n if ball.ycor() > GAME_Y_BARRIER or ball.ycor() < -GAME_Y_BARRIER:\n ball.bounce_y()\n # with the paddles\n if ball.distance(paddle_right) < 50 and ball.xcor() > GAME_X_BARRIER \\\n or ball.distance(paddle_left) < 50 and ball.xcor() < -GAME_X_BARRIER:\n ball.bounce_x()", "def collision_check(self):\n return True", "def collides_with(self, other):\n\t\tdistance = self.position.distance_to(other.position) # Vector2.distance_to()\n\t\treturn distance < self.radius + other.radius", "def has_collide(self, obj):\n rect1 = self.anim.getRect()\n rect2 = obj.anim.getRect()\n \n rect1.move_ip(self.pos)\n rect2.move_ip(obj.pos)\n \n return rect1.colliderect(rect2)", "def _valid_bond_pair(self, set):\n (sbu1, cp1), (sbu2, cp2) = set\n if all([i is None for i in [cp1.special, cp2.special, cp1.constraint, cp2.constraint]]):\n return sbu1.is_metal != sbu2.is_metal\n\n return (cp1.special == cp2.constraint) and (cp2.special == cp1.constraint)", "def is_collision_conf(self, q: np.ndarray) -> bool:\n for obs in self.obstacles:\n if np.fabs(q[2]-obs[0]) <= obs[3] and np.fabs(q[0]) <= obs[1] and np.fabs(q[1]) <= obs[2]:\n return True\n return False", "def is_balanced(self):\n\n all_contact = (self.feet_contact == np.ones(self.feet_contact.shape)).all()\n none_contact = (self.feet_contact == np.zeros(self.feet_contact.shape)).all()\n if all_contact or none_contact:\n # If _both_ or _neither_ foot are in contact, check if centre of mass lies in bounding box\n left_x, right_x = float(\"inf\"), -float(\"inf\")\n for i, in_contact in enumerate(self.feet_contact):\n foot = self.feet[i]\n if in_contact:\n # First foot is \"foot_left_geom\", second is \"foot_geom\"\n contacts = foot.contact_list() # Non-empty if foot touching ground\n contact_info = contacts[0] # Ground\n # Position 6 contains (x,y,z) of contact in world coordinates\n x, _, _ = contact_info[6]\n left_x = x if x <= left_x else left_x\n right_x = x if x >= right_x else right_x\n else:\n x, _, _ = foot.pose().xyz()\n left_x = x if x <= left_x else left_x\n right_x = x if x >= right_x else right_x\n\n # Centre of mass\n x, _, _ = self.body_xyz\n # mask return 1 for foot not in contact\n mask = np.array([0. if x <= right_x else 1., 0. if left_x <= x else 1.])\n return left_x <= x <= right_x, mask\n else:\n # If one of the feet is not in contact, we want body to not sway past the contacting foot\n com_vx, _, _ = self.robot_body.speed()\n com_x, _, _ = self.body_xyz\n\n # mask return 1 for foot not in contact\n mask = np.ones(self.feet_contact.shape) - self.feet_contact\n\n if self.feet_contact[0] == 1:\n # Right foot in contact\n foot = self.feet[0]\n else:\n # Left foot in contact\n foot = self.feet[1]\n\n # Get contact info\n contacts = foot.contact_list()\n contact_info = contacts[0]\n # Position 6 contains (x,y,z) of contact in world coordinates\n foot_x, _, _ = contact_info[6]\n foot_vx, _, _ = foot.speed()\n\n if self.feet_contact[0] == 1:\n # Right foot in contact\n # 1. CoM on the left side of right foot\n # 2. CoM on the right side, but is moving to towards balance\n delta_x = com_x - foot_x\n delta_nx = (com_x + com_vx) - (foot_x + foot_vx)\n return delta_x <= 0 or (delta_x > 0 and delta_nx <= delta_x), mask\n else:\n # Left foot in contact\n delta_x = com_x - foot_x\n delta_nx = (com_x + com_vx) - (foot_x + foot_vx)\n return delta_x >= 0 or (delta_x < 0 and delta_nx >= delta_x), mask", "def check_collision(self, a, b):\n\n dis_x = abs((a.x+a.r + a.dx)-(b.x+b.r + b.dx))\n dis_y = abs((a.y+a.r + a.dy)-(b.y+b.r + b.dy))\n distance = math.sqrt(dis_x*dis_x + dis_y*dis_y)\n\n if distance <= (b.r + a.r) and (a.colliding == False or b.colliding == False):\n\n return True", "def is_in_collision(self, molecule):\n if self is molecule: # a molecule can't collide with itself\n return False\n return (self.position - molecule.position).length() < 2 * Molecule.radius", "def collide(obj1, obj2):\n offset_x = obj2.x - obj1.x #The difference between obj1 and obj 2\n offset_y = obj2.y - obj1.y \n return obj1.mask.overlap(obj2.mask, (int(offset_x), int(offset_y))) != None # (x,y)", "def circles_collide(x1: float, y1: float, r1: float, x2: float, y2: float, r2: float) -> bool:\n return distance_between_sq(x1, y1, x2, y2) <= (r1 + r2)**2", "def count_bond_collisions(self):\n\n errors = 0\n\n for i in range(0, len(self.bonds)):\n for a in range(i + 1, len(self.bonds)):\n result = self._intersection(self.bonds[i], self.bonds[a])\n\n if result:\n errors += 1\n return errors", "def check_if_no_bond(atom1, atom2, bond_list, bond_generic):\n check = False\n for bond in bond_list:\n if ((bond.identity == get_bond_id(atom1, atom2)[0]) or (bond.identity == get_bond_id(atom1, atom2)[1]) and calculate_3D_distance_2_atoms(atom1, atom2) > 1.05 * bond.length):\n check = True\n for bond in bond_generic:\n if (((atom1.atom_name[0] + atom2.atom_name[0]) == bond.identity) or (atom2.atom_name[0] + atom1.atom_name[0] == bond.identity) and (calculate_3D_distance_2_atoms(atom1, atom2) > 1.05 * bond.length)):\n check = True \n return check", "def has_crossing_len2_ob(self) -> bool:\n fcell = self.first_cell\n scell = self.second_cell\n if self._fuse_row:\n possible_obs = [\n GriddedPerm((0, 1), (fcell, scell)),\n GriddedPerm((1, 0), (scell, fcell)),\n ]\n else:\n possible_obs = [\n GriddedPerm((0, 1), (fcell, scell)),\n GriddedPerm((1, 0), (fcell, scell)),\n ]\n return any(ob in possible_obs for ob in self._tiling.obstructions)", "def check_for_collision(self, obj: Union[Pipe, Base], base: Base) -> bool:\n bird_mask = self.get_mask()\n for mask, top in zip(obj.get_mask(), obj.get_tops()):\n if bird_mask.overlap(mask, (round(obj.x) - self.x, top - int(self.y))):\n return True\n\n # Collision with ground\n if self._rotate_image()[1].colliderect(base.rect):\n return True\n return False", "def _check_for_bonus_collision(self, player):\n for bonus_index, bonus in enumerate(self.bonuses):\n if pygame.sprite.collide_mask(bonus, player):\n self._activate_bonus(bonus.type, player)\n del self.bonuses[bonus_index]\n return True\n return False", "def intersects(self, other: \"BB\") -> bool:\n return bool(lib.cpBBIntersects(self, other))", "def box_collision(self):\n border_box_pos_1 = self.box_1.x + self.box_1.width/2\n border_box_pos_2 = self.box_2.x - self.box_2.width/2\n\n if (border_box_pos_2 - border_box_pos_1) <= 0:\n return True\n else:\n return False", "def check_collision(self):\n if self.window.get_object_at(self.ball.x,self.ball.y+self.radius*2) is self.paddle:\n self.bounce()\n if self.window.get_object_at(self.ball.x+self.radius*2,self.ball.y+self.radius*2) is self.paddle:\n self.bounce()", "def validBond(index1, index2, direction):\n #print \"?valid bond: \", allAtoms[index1].pos, \" , \", allAtoms[index2].pos, direction\n cell1 = index1/numAtomsPerCell\n cell2 = index2/numAtomsPerCell\n #Find the coordinates of the cell in units of interaction cells\n posInX1 = int(cell1/(size*size))\n posInX2 = int(cell1/(size*size))\n leftover1 = cell1%(size*size)\n leftover2 = cell2%(size*size)\n posInY1 = int(leftover1/size)\n posInY2 = int(leftover2/size)\n posInZ1 = leftover1%size\n posInZ2 = leftover2%size\n \n #Now, a valid interaction can cross an interaction cell boundary in any direction,\n #but it has a maximum length of one interaction cell. However, I have made the minimum\n #size of this larger translated lattice equal to 3*3*3 interaction cells. Therefore,\n #when we hit an edge and get in invalid interaction, the cells will be at least 2\n #interaction cells apart in the direction of the interaction.\n if(direction[0]):\n if numpy.abs(posInX1 - posInX2)>1:\n #print \"false\"\n return False\n if(direction[1]):\n if numpy.abs(posInY1 - posInY2)>1:\n #print \"false\"\n return False\n if(direction[2]):\n if numpy.abs(posInZ1 - posInZ2)>1:\n #print \"false\"\n return False\n print #\"true\"\n return True\n\n #Old (incorrect) method:\n if 0:\r\n print \"?valid bond: \", allAtoms[index1].pos, \" , \", allAtoms[index2].pos, direction\r\n cell1 = index1/numAtomsPerCell\r\n cell2 = index2/numAtomsPerCell\r\n zRow1 = cell1/size#this relies on the list being created in the nested for loop that was used, z within y within x\r\n zRow2 = cell2/size\r\n if(zRow1 != zRow2 and direction[2]):\n print \"false\"\r\n return False\r\n xLayer1 = cell1/(size*size)\r\n xLayer2 = cell2/(size*size)\r\n if(xLayer1 != xLayer2 and direction[1]):\n print \"false\"\r\n return False\r\n #shouldn't have to check z, because if it's not valid in z direction, it would be off the list (>len(allAtoms))\n print \"true\"\r\n return True", "def is_equivalence(self) -> bool:", "def check_bonds(self, force0, force1):\n\n assert type(force0) == type(force1), \"Error: force0 and force1 must be the same type.\"\n assert type(force0) == mm.HarmonicBondForce, \"Error: forces must be HarmonicBondForces\"\n\n n_bonds0 = force0.getNumBonds()\n n_bonds1 = force1.getNumBonds()\n\n dict0, dict1 = {}, {}\n\n i0, i1, r0, k0 = force0.getBondParameters(0)\n unit_r = u.angstrom\n #unit_k = k0.unit\n unit_k = u.kilojoules_per_mole/(u.angstrom)**2\n\n for k in range(n_bonds0):\n i0, i1, r0, k0 = force0.getBondParameters(k)\n i0, i1 = reorder_bonds(i0, i1)\n if k0 / k0.unit != 0.0: # Skip forces with strength 0.0\n dict0[i0, i1] = ((r0 / unit_r, k0 / unit_k))\n\n for k in range(n_bonds1):\n i0, i1, r0, k0 = force1.getBondParameters(k)\n i0, i1 = reorder_bonds(i0, i1)\n if k0 / k0.unit != 0.0: # Skip forces with strength 0.0\n dict1[i0, i1] = ((r0 / unit_r, k0 / unit_k))\n\n keys0 = set(dict0.keys())\n keys1 = set(dict1.keys())\n logger.info(\"Bonds0 - Bonds1 = %s\" % (keys0.difference(keys1)))\n logger.info(\"Bonds1 - Bonds0 = %s\" % (keys1.difference(keys0)))\n assert set(dict0.keys()) == set(dict1.keys()), \"Systems have different HarmonicBond Forces\"\n\n for k, parameter_name in enumerate([\"r0\", \"k0\"]):\n for (i0, i1) in dict0.keys():\n val0 = dict0[i0, i1][k]\n val1 = dict1[i0, i1][k]\n if parameter_name=='r0':\n assert compare(val0, val1), \"Error: Harmonic Bond distance (%d, %d) has equilibrium distances of %f and %f angstroms, respectively.\" % (i0, i1, val0, val1)\n else:\n assert compare(val0, val1), \"Error: Harmonic Bond force constant (%d, %d) has values of %f and %f kJ/mol, respectively.\" % (i0, i1, val0, val1)", "def checkBallCollision(self, ball):\n\t\tres, cornx, corny = ball.collWithRect(self.bbox)\n\t\t# print res,cornx, corny\n\t\tif res==0:\n\t\t\treturn False\n\n\t\tleft = self.x-self.length/2\n\t\tif ball.x >= left and ball.x <= self.x+self.length/2:\n\t\t\t#bounce normally\n\t\t\tmagnitude = math.sqrt(ball.vx**2+ball.vy**2)\n\t\t\tfor i in range(1, 7):\n\t\t\t\tif ball.x < left+self.length/7*i:\n\t\t\t\t\tangle = math.radians(120.-10.*(i-1))\n\t\t\t\t\tball.vx = magnitude*math.cos(angle)\n\t\t\t\t\tball.vy = -magnitude*math.sin(angle)\n\t\t\t\t\treturn True\n\t\t\t#maximum right\n\t\t\tangle = math.radians(60.)\n\t\t\tball.vx = magnitude*math.cos(angle)\n\t\t\tball.vy = -magnitude*math.sin(angle)\n\t\t\treturn True\n\t\t\treturn True\n\n\t\t#bounce with the caps\n\t\tif ball.collWithCircle(self.x-self.length/2, self.y, self.height/2):\n\t\t\tball.bouncePoint(self.x-self.length/2, self.y)\n\t\t\treturn True\n\t\telif ball.collWithCircle(self.x+self.length/2, self.y, self.height/2):\n\t\t\tball.bouncePoint(self.x+self.length/2, self.y)\n\t\t\treturn True\n\t\treturn False", "def isUnbondedAtomConnected (self,spike): \r\n for i in range(len(self.metaSpikes)):\r\n if self.metaSpikes[i].typeSpike == 1:\r\n for j in range(len(self.metaSpikes[i].danglingNodeList)):\r\n if self.metaSpikes[i].danglingNodeList[j].spike == spike and self.metaSpikes[i].danglingNodeList[j].bonded == True:\r\n # If spike has a dangling node and the this is bonded then return True\r\n return True\r\n else:\r\n for j in range(len(self.metaSpikes[i].danglingTailList)):\r\n if self.metaSpikes[i].danglingTailList[j].spike == spike and self.metaSpikes[i].danglingTailList[j].bonded == True:\r\n # If spike has a dangling tail and the this is bonded then return True\r\n return True\r\n # If True has not been returned by this point either spike has no dangling tails or nodes or if it does have them then none\r\n # are bonded\r\n return False", "def check_nonbonded(self, force0, force1):\n\n assert type(force0) == type(force1), \"Error: force0 and force1 must be the same type.\"\n assert type(force0) == mm.NonbondedForce, \"Error: forces must be NonbondedForces\"\n assert force0.getNumParticles() == force1.getNumParticles(), \"Error: Systems have %d and %d particles in NonbondedForce, respectively.\" % (force0.getNumParticles(), force1.getNumParticles())\n\n n_atoms = force0.getNumParticles()\n\n q, sigma, epsilon = force0.getParticleParameters(0)\n #unit_q, unit_sigma, unit_epsilon = q.unit, sigma.unit, epsilon.unit\n unit_q = u.elementary_charge\n unit_sigma = u.angstrom\n unit_epsilon = u.kilojoule_per_mole\n\n for k in range(n_atoms):\n q0, sigma0, epsilon0 = force0.getParticleParameters(k)\n q1, sigma1, epsilon1 = force1.getParticleParameters(k)\n\n q0, sigma0, epsilon0 = q0 / unit_q, sigma0 / unit_sigma, epsilon0 / unit_epsilon\n q1, sigma1, epsilon1 = q1 / unit_q, sigma1 / unit_sigma, epsilon1 / unit_epsilon\n\n assert compare(q0, q1), \"Error: Particle %d has charges of %f and %f, respectively.\" % (k, q0, q1)\n\n if epsilon0 != 0.:\n assert compare(sigma0, sigma1), \"Error: Particle %d has sigma of %f and %f angstroms, respectively.\" % (k, sigma0, sigma1)\n else:\n logger.info(\"Skipping comparison of sigma (%f, %f) on particle %d because epsilon has values %f, %f kJ/mol\" % (sigma0, sigma1, k, epsilon0, epsilon1))\n\n assert compare(epsilon0, epsilon1), \"Error: Particle %d has epsilon of %f and %f kJ/mol, respectively.\" % (k, epsilon0, epsilon1)\n\n n_exceptions = force0.getNumExceptions()\n assert force0.getNumExceptions() == force1.getNumExceptions(), \"Error: Systems have %d and %d exceptions in NonbondedForce, respectively.\" % (force0.getNumExceptions(), force1.getNumExceptions())\n\n i0, i1, qq, sigma, epsilon = force0.getExceptionParameters(0)\n unit_qq = u.elementary_charge**2\n unit_sigma = u.angstrom\n unit_epsilon = u.kilojoule_per_mole\n\n dict0, dict1 = {}, {}\n for k in range(n_exceptions):\n i0, i1, qq, sigma, epsilon = force0.getExceptionParameters(k)\n i0, i1 = reorder_bonds(i0, i1)\n dict0[i0, i1] = ((qq / unit_qq, sigma / unit_sigma, epsilon / unit_epsilon))\n\n i0, i1, qq, sigma, epsilon = force1.getExceptionParameters(k)\n i0, i1 = reorder_bonds(i0, i1)\n dict1[i0, i1] = ((qq / unit_qq, sigma / unit_sigma, epsilon / unit_epsilon))\n\n keys0 = set(dict0.keys())\n keys1 = set(dict1.keys())\n logger.info(\"Exceptions0 - Exceptions1 = %s\" % (keys0.difference(keys1)))\n logger.info(\"Exceptions1 - Exceptions0 = %s\" % (keys1.difference(keys0)))\n assert set(dict0.keys()) == set(dict1.keys()), \"Systems have different NonBondedForce Exceptions\"\n\n for k, parameter_name in enumerate([\"qq\", \"sigma\", \"epsilon\"]):\n for (i0, i1) in dict0.keys():\n val0 = dict0[i0, i1][k]\n val1 = dict1[i0, i1][k]\n if parameter_name == \"sigma\" and dict0[i0, i1][2] == 0.0 and dict1[i0, i1][2] == 0.0:\n continue # If both epsilon parameters are zero, then sigma doesn't matter so skip the comparison.\n if parameter_name ==\"sigma\":\n assert compare(val0, val1), \"Error: NonBondedForce Exception, atom (%d, %d) has sigma values of %f and %f angstroms, respectively.\" % (i0, i1, parameter_name, val0, val1)\n elif parameter_name==\"qq\":\n assert compare(val0, val1), \"Error: NonBondedForce Exception atom (%d, %d) has squared charge values of %f and %f (elementary charge)**2, respectively.\" % (i0, i1, val0, val1)\n else:\n assert compare(val0, val1), \"Error: NonBondedForce Exception, atom (%d, %d) has epsilon values of %f and %f kJ/mol, respectively.\" % (i0, i1, val0, val1)", "def collide(b1, b2):\n dx = b1.x - b2.x\n dy = b1.y - b2.y\n\n distance = math.hypot(dx, dy)\n \n if distance < b1.size + b2.size: # If they have collided\n tangent = math.atan2(dy, dx) # Find the tangent of the point\n angle = 0.5 * math.pi + tangent # We use this later on\n b1.angle = 2*tangent - b1.angle # Alter angles\n b2.angle = 2*tangent - b2.angle\n (b1.speed, b2.speed) = (b2.speed, b1.speed) # Swap speeds\n b1.speed *= elasticity # Reduce speed due to elasticity\n b2.speed *= elasticity\n\n b1.x += math.sin(angle) # Move particles away from each other\n b1.y -= math.cos(angle)\n b2.x -= math.sin(angle)\n b2.y += math.cos(angle)", "def _is_hbond(donor, donor_h, acceptor, box, cutoff_dist, cutoff_angle):\n cutoff_angle_rad = np.deg2rad(cutoff_angle)\n theta = angle(donor, donor_h, acceptor, box=box)\n dist = distance(donor_h, acceptor, box=box)\n return (theta > cutoff_angle_rad) & (dist <= cutoff_dist)", "def check_overlap(self, a, b):\n return utils.is_point_in_circle(b.get_pos(), a.get_pos(), a.radius)", "def bond_check(bond_distance,bond_min=0,bond_max=1.5): # we can define the default min and max in the def\n if bond_distance >bond_min and bond_distance<bond_max:\n return True\n else:\n return False", "def hasCollidedWith(self,otherEntity):\n distance=math.sqrt((otherEntity.xPos-self.xPos)**2+(otherEntity.yPos-self.yPos)**2)\n return distance < (self.hitboxRadius+otherEntity.hitboxRadius)", "def _collide(self):\n\n collisions = self._get_collisions()\n for collision in collisions:\n self._update_excitation(collision)\n atom1 = self.atoms[collision[0]]\n atom2 = self.atoms[collision[1]]\n\n r = atom1.pos-atom2.pos\n r_mag = np.linalg.norm(r)\n r_hat = r/r_mag\n\n v_1_r = np.dot(atom1.vel, r_hat)\n v_2_r = np.dot(atom2.vel, r_hat)\n\n v_1_r_f = (atom1.mass-atom2.mass)*v_1_r/(atom1.mass + atom2.mass)\\\n + 2*atom2.mass*v_2_r/(atom1.mass + atom2.mass)\n v_2_r_f = (atom2.mass-atom1.mass)*v_2_r/(atom1.mass + atom2.mass)\\\n + 2*atom1.mass*v_1_r/(atom1.mass + atom2.mass)\n\n delta_v_1 = (v_1_r_f - v_1_r)*r_hat\n delta_v_2 = (v_2_r_f - v_2_r)*r_hat\n\n self.atoms[collision[0]].vel += delta_v_1\n self.atoms[collision[1]].vel += delta_v_2", "def collision( ball1, ball2 ):\n\t\t\n\t#equation from wikipedia\n\ta1 = 2 * float(ball2.mass / (ball1.mass + ball2.mass))\t\t\t\t\t\t\t\t# 2 * m2 / ( m1 + m2 ) \n\ta2 = 2 - a1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# 2 * m1 / ( m1 + m2 ) = 2 - m2 / ( m1 + m2 ) \n\tb = (ball1.velocity - ball2.velocity) * (ball1.position - ball2.position)\t\t\t# < v1 - v2, x1 - x2 > = < v2 - v1, x2 - x1 >\n\tc = (ball1.position - ball2.position).norm() \t\t\t\t\t\t\t\t\t\t# || x1 - x2 || ^ 2\t= || x2 - x1 || ^ 2\t\n\tif c == 0:\n\t\tc = 0.01\t\t\t\t\t\t\n\td = b / c\n\n\t#enter new velocites\n\tball1.velocity = ball1.velocity - (ball1.position - ball2.position) * a1 * d\n\tball2.velocity = ball2.velocity - (ball2.position - ball1.position) * a2 * d\n\n\t#changing color \n\tball1.color = ball2.color = ( \t(ball1.color[0] + ball2.color[0]) * 0.5, \n\t\t\t\t\t\t\t\t\t(ball1.color[1] + ball2.color[1]) * 0.5, \n\t\t\t\t\t\t\t\t\t(ball1.color[2] + ball2.color[2]) * 0.5\t\t)", "def check_wall_collision(self):\r\n if self.head.xcor() > 280 or self.head.xcor() < -280 or \\\r\n self.head.ycor() > 280 or self.head.ycor() < -280:\r\n return False\r\n else:\r\n return True", "def collide_hit_rect(first, second):\n return first.hit_rect.colliderect(second.hit_rect) or first.hit_rect.colliderect(second.hit_rect)", "def reached_final_point():\n return all(point.constraints[b.atom_indexes] == b.final_dist\n for b in self.bonds)", "def intersects(self, other): # -> bool:\n ...", "def _check_for_collision(self, sample):\n collide=False\n for i in range(len(self.obstacles)):\n collide=collide or self.obstacles[i].in_collision(sample)\n return collide", "def chain_reaction( b1, b2):\n if b1.color == color.red or b2.color == color.red:\n if mag(b1.pos-b2.pos) < b1.radius + b2.radius:\n return True\n\n return False", "def collide(self, x, y):\n return self._rect.collidepoint(x, y)", "def check_collision(self, footprint):\n return self.upperleft[0] < footprint.upperleft[0] < footprint.upperright[0] < self.upperright[0] and \\\n self.upperleft[1] < footprint.upperleft[1] < footprint.bottomleft[1] < self.bottomleft[1]", "def can_overlap(self):\n return False", "def is_in_collision_with_other_molecule(self, molecule):\n for m in self.molecules:\n if molecule.is_in_collision(m):\n return m\n return None", "def check_collision(self, p1xy, p2xy):\n p1rc = xy2rc(p1xy)\n p2rc = xy2rc(p2xy)\n rr, cc = line(int(p1rc[0]), int(p1rc[1]), int(p2rc[0]), int(p2rc[1]))\n line_coords_rc = np.vstack([rr, cc]).T\n for line_coord_rc in line_coords_rc:\n if array_in_list(line_coord_rc, list(self.obstacles)):\n return True\n return False", "def is_conjugate(self):\n return self.is_dagger and bool(self.z)", "def handle_collide_with_borders(self, surface_size):\n if self.position[0] < Molecule.radius or self.position[0] > surface_size[0] - Molecule.radius:\n self.speed[0] = -self.speed[0]\n return True\n if self.position[1] < Molecule.radius or self.position[1] > surface_size[1] - Molecule.radius:\n self.speed[1] = -self.speed[1]\n return True\n return False", "def is_spare(self):\n if self.is_strike():\n return False\n\n return (self.first_ball + self.second_ball) == 10", "def collides(pos1, rad1, pos2, rad2):\n x1 = pos1[0]\n y1 = pos1[1]\n\n x2 = pos2[0]\n y2 = pos2[1]\n\n # Right edge is over the left edge and\n # Left edge is not over the right edge\n right_horizontal_colliding = (\n ((x1 + rad1) > (x2 - rad2)) and ((x1 - rad1) < (x2 + rad2))\n )\n left_horizontal_colliding = (\n ((x1 - rad1) < (x2 + rad2)) and ((x1 + rad1) > (x2 - rad2))\n )\n vertical_colliding = (\n ((y1 + rad1) > (y2 - rad2)) and ((y1 - rad1) < (y2 + rad2))\n )\n\n horizontal_colliding = \\\n right_horizontal_colliding or left_horizontal_colliding\n\n return horizontal_colliding and vertical_colliding", "def check_collision(self, x_3x1, avoidance_radius):\n if len(self.obstacles) > 1:\n for obs_point in self.obstacles[:]:\n dist = np.linalg.norm(obs_point - x_3x1)\n if dist < avoidance_radius:\n print \"dist: \" + str(dist)\n # a collision was found within the avoidance radius\n return True\n return False", "def bbox_collision(bbox1, bbox2):\n\n bbox1 = np.asarray(bbox1)\n bbox2 = np.asarray(bbox2)\n\n max1 = np.max(bbox1, axis=1)\n min1 = np.min(bbox1, axis=1)\n\n max2 = np.max(bbox2, axis=1)\n min2 = np.min(bbox2, axis=1)\n\n out = (min1 <= max2) & (max1 >= min2)\n return np.all(out)", "def collide(self):\n dist = distance.cdist(self.object_position, self.object_position, \"euclidean\")\n collision = ((dist - self.object_radius) <= 0) * 1\n np.fill_diagonal(collision, 0)\n collision = np.sum(collision, axis=1)\n print(dist)\n print(collision)\n return collision", "def is_collision_by_map_obstacle(self):\n for content in self.contents:\n if self.content.y == self.y and self.content.x == self.x:\n return True\n else:\n return False", "def check_collide(self, card_):\n if len(self.cards) > 0:\n return self.cards[-1].check_collide(card_=card_)\n else:\n return card_.check_collide(pos=self.pos)", "def is_close_to_collision(self, od_bbox=None, min_dists=None, *, return_min_dist=False, thres=D2C_THRESHOLD):\n\t\tif od_bbox is None or min_dists is None:\n\t\t\tgen = self.loop(od_bbox=True, dist_to_col=True)\n\t\t\tod_bbox, dist_to_col = next(gen)\n\n\t\tmin_dists = min_dists[self._boxes_in_window(od_bbox)]\n\n\t\tmin_dist = min_dists[np.isfinite(min_dists)].min()\n\t\tis_close = min_dist < D2C_THRESHOLD\n\n\t\tif return_min_dist:\n\t\t\treturn is_close, min_dist\n\t\telse:\n\t\t\treturn is_close", "def bat(j1, j2):\n return (j1 == CISEAUX and j2 == FEUILLE) or\\\n (j1 == FEUILLE and j2 == PIERRE) or\\\n (j1 == PIERRE and j2 == CISEAUX)", "def bounce_collision(self, otherball):\r\n # relative positions\r\n dx = self.unif[0] - otherball.unif[0]\r\n dy = self.unif[1] - otherball.unif[1]\r\n rd = self.radius + otherball.radius\r\n # check sign of a.b to see if converging\r\n dotP = dot([dx, dy, 0.0],\r\n [self.vx - otherball.vx, self.vy - otherball.vy, 0.0])\r\n if dx * dx + dy * dy <= rd * rd and dotP < 0:\r\n R = otherball.mass / self.mass #ratio of masses\r\n \"\"\"Glancing angle for equating angular momentum before and after collision.\r\n Three more simultaneous equations for x and y components of momentum and\r\n kinetic energy give:\r\n \"\"\"\r\n if dy:\r\n D = dx / dy\r\n delta2y = 2 * (D * self.vx + self.vy -\r\n D * otherball.vx - otherball.vy) / (\r\n (1 + D * D) * (R + 1))\r\n delta2x = D * delta2y\r\n delta1y = -1 * R * delta2y\r\n delta1x = -1 * R * D * delta2y\r\n elif dx:\r\n # Same code as above with x and y reversed.\r\n D = dy / dx\r\n delta2x = 2 * (D * self.vy + self.vx -\r\n D * otherball.vy - otherball.vx) / (\r\n (1 + D * D) * (R + 1))\r\n delta2y = D * delta2x\r\n delta1x = -1 * R * delta2x\r\n delta1y = -1 * R * D * delta2x\r\n else:\r\n delta1x = delta1y = delta2x = delta2y = 0\r\n\r\n self.vx += delta1x\r\n self.vy += delta1y\r\n otherball.vx += delta2x\r\n otherball.vy += delta2y", "def carsAreClose(car1: Car, car2: Car):\n op = overlapPercent(car1.box, car2.box)\n if op > 0.3:\n return True\n else:\n return False", "def isCollided(birdX, birdY, upper_pipes, lower_pipes, groundY):\n birdHeight = SPRITES['bird'].get_height()\n # Getting the lower left coordinate instead of upper left coordinate\n if birdY + birdHeight > groundY or birdY < 0: # if the player touches the top of the screen or falls on the ground\n return True\n\n for u_pipe, l_pipe in zip(upper_pipes, lower_pipes):\n # Both pipes have same dimensions\n pipeHeight = SPRITES['pipe_inverted'].get_height()\n pipeWidth = SPRITES['pipe_inverted'].get_width()\n\n if (birdY < u_pipe['y'] + pipeHeight) and (abs(birdX - u_pipe['x']) < pipeWidth):\n return True\n\n # As the birdY is measure from top left corner\n if (birdY + SPRITES['bird'].get_height() > l_pipe['y']) and (abs(birdX - l_pipe['x']) < pipeWidth):\n return True\n\n return False", "def collide(self, p1, p2):\n distance = p1.pos.distance(p2.pos) # distance between to particles\n if distance.length() < (p1.size + p2.size):\n pass", "def is_on(a, b, c):\r\n return(isCollinear(a, b, c) and (within(a[0], c[0], b[0]) if a[0] != b[0] else\r\n within(a[1], c[1], b[1])))", "def ball_collision_update(self):\r\n ball_pairs = self.balls_colliding()\r\n for ball_pair in ball_pairs:\r\n b1,b2 = ball_pair\r\n self.ball_pair_collision_update(b1,b2)", "def hit(self, otherball):\r\n dx = (self.unif[0] + self.vx) - (otherball.unif[0] + otherball.vx)\r\n dy = (self.unif[1] + self.vy) - (otherball.unif[1] + otherball.vy)\r\n rd = self.radius + otherball.radius\r\n return dot(dx, dy) < (rd * rd)", "def is_bijective(self):\n return self.is_injective() and self.is_surjective()", "def check_collision(self):\n self.collided = False\n\n for point in self.collision_points:\n\n try:\n if self.game_map.get_at((\n int(point[0]), int(point[1])\n )) == WHITE_COLOR:\n self.collided = True\n break\n except:\n self.collided = True", "def obstacle_between(self, node1, node2, agent):\n if self.obstacles[agent] is None:\n return False\n if self.is_inside(node1, self.obstacles, agent)[0] or self.is_inside(node2, self.obstacles, agent)[0]:\n return True\n\n for cords in self.xy_cords:\n x1 = node1.state[cords[0]]\n y1 = node1.state[cords[1]]\n x2 = node2.state[cords[0]]\n y2 = node2.state[cords[1]]\n p1 = Point(x1, y1)\n q1 = Point(x2, y2)\n for obstacle in self.obstacles[agent]:\n x_min = obstacle[0][0]\n x_max = obstacle[0][1]\n y_min = obstacle[1][0]\n y_max = obstacle[1][1]\n p2 = Point(x_min, y_min)\n q2 = Point(x_min, y_max)\n if doIntersect(p1, q1, p2, q2):\n return True\n p2 = Point(x_min, y_max)\n q2 = Point(x_max, y_max)\n if doIntersect(p1, q1, p2, q2):\n return True\n p2 = Point(x_max, y_max)\n q2 = Point(x_max, y_min)\n if doIntersect(p1, q1, p2, q2):\n return True\n p2 = Point(x_max, y_min)\n q2 = Point(x_min, y_min)\n if doIntersect(p1, q1, p2, q2):\n return True\n return False", "def compatible(self, other: 'Reaction') -> bool:\n return self.lhs.compatible(other.lhs) and self.rhs.compatible(other.rhs)", "def cylinder_collision_detection(\n point_a1, point_a2, radius_a, point_b1, point_b2, radius_b, bbox_a=None, bbox_b=None\n):\n\n if bbox_a is None:\n bbox_a = get_bbox([point_a1, point_a2], margin=radius_a)\n if bbox_b is None:\n bbox_b = get_bbox([point_b1, point_b2], margin=radius_b)", "def bornoff(self, board):\n res = False\n if (self.player):\n if (reduce(lambda x, y: x+y, board.p1vec) < reduce(lambda x, y: x+y, self.board.p1vec)):\n res = True\n else:\n if (reduce(lambda x, y: x+y, board.p2vec) < reduce(lambda x, y: x+y, self.board.p2vec)):\n res = True\n return res", "def check_if_equal(self, other):\n if self.get_time_left() == other.get_time_left():\n if len(self.balls) == len(other.balls) and len(self.hexagons) == len(other.hexagons):\n for player in self.players:\n does_have_equal_player = False\n for other_player in other.players:\n if player == other_player:\n does_have_equal_player = True\n break\n if not does_have_equal_player:\n return False\n for bubble in self.balls:\n does_have_equal_bubble = False\n for other_bubble in other.balls:\n if bubble == other_bubble:\n does_have_equal_bubble = True\n break\n if not does_have_equal_bubble:\n return False\n for bubble in self.hexagons:\n does_have_equal_bubble = False\n for other_bubble in other.hexagons:\n if bubble == other_bubble:\n does_have_equal_bubble = True\n break\n if not does_have_equal_bubble:\n return False\n return True\n return False", "def corral_collide(ball):\r\n\r\n # If the ball hits wallA\r\n if ball.pos.z < wallA.pos.z: # Hit -- check for z\r\n ball.pos.z = wallA.pos.z # Bring back into bounds\r\n ball.vel.z *= -1.0 # Reverse the z velocity\r\n\r\n # If the ball hits wallB\r\n if ball.pos.x < wallB.pos.x: # Hit -- check for x\r\n ball.pos.x = wallB.pos.x # Bring back into bounds\r\n ball.vel.x *= -1.0 # Reverse the x velocity\r\n \r\n # If the ball hits wallC\r\n if ball.pos.z > wallC.pos.z: # Hit -- check for x\r\n ball.pos.z = wallC.pos.z # Bring back into bounds\r\n ball.vel.z *= -1.0 # Reverse the x velocity\r\n \r\n # If the ball hits wallD\r\n if ball.pos.x > wallD.pos.x: #Hit -- check for z\r\n ball.pos.x = wallD.pos.x # Bring back into bounds\r\n ball.vel.x *= -1.0 #Reverse the z velocity", "def is_conjugated(self):\n\n return np.array([bond.is_conjugated for bond in self])", "def willcollide(self, p, c, r, v=None):\n return (p.step(dt).vec(c)).len() > r", "def collision(self, direction):\n if direction == \"north\":\n # valide north cell\n if self.check_coverage_collision(self.curr_cell.row - 1,\n self.curr_cell.col):\n return True\n\n elif direction == \"south\":\n # valide north cell\n if self.check_coverage_collision(self.curr_cell.row + 1,\n self.curr_cell.col):\n return True\n\n elif direction == \"east\":\n # valide north cell\n if self.check_coverage_collision(self.curr_cell.row,\n self.curr_cell.col + 1):\n return True\n\n elif direction == \"west\":\n # valide north cell\n if self.check_coverage_collision(self.curr_cell.row,\n self.curr_cell.col - 1):\n return True\n\n return False", "def isCrossingCircle(self, other):\n vector = Vector.createFromTwoPoints(self.center, other.center)\n return vector.norm < self.radius + other.radius", "def collides(a, b):\n # Current locations.\n xa = a.x\n xb = b.x\n ya = a.y\n yb = b.y\n\n # Treat b as a point, we only need one radius.\n try:\n radius = a.radius + b.radius\n except AttributeError:\n radius = getattr(a, 'radius', 0.5) + getattr(b, 'radius', 0.5)\n\n # Previous frame locations.\n try: pxa = a.px\n except KeyError: pxa = xa\n try: pya = a.py\n except KeyError: pya = ya\n try: pxb = b.px\n except KeyError: pxb = xb\n try: pyb = b.py\n except KeyError: pyb = yb\n\n # Translate b's final position to be relative to a's start.\n # And now, circle/line collision.\n dir_x = pxa + (xb - xa) - pxb\n dir_y = pya + (yb - ya) - pyb\n\n diff_x = pxa - pxb\n diff_y = pya - pyb\n if (dir_x < 0.0001 and dir_x > -0.0001\n and dir_y < 0.0001 and dir_y > -0.0001):\n # b did not move relative to a, so do point/circle.\n return diff_x * diff_x + diff_y * diff_y < radius * radius\n\n # dot(diff, dir) / dot(dir, dir)\n t = (diff_x * dir_x + diff_y * dir_y) / (dir_x * dir_x + dir_y * dir_y)\n if t < 0:\n t = 0\n elif t > 1:\n t = 1\n\n dist_x = pxa - (pxb + dir_x * t)\n dist_y = pya - (pyb + dir_y * t)\n\n # dist_sq < radius_sq\n return dist_x * dist_x + dist_y * dist_y <= radius * radius", "def __hit_bricks(self, g_object):\n return type(g_object) == GRect and g_object != self.__paddle", "def _intersects_1D(A, B):\n return False if (B[1] <= A[0]) or (B[0] >= A[1]) else True", "def checkSpikeBonding (self):\r\n stable = True # If any bonds break this will be set to false\r\n stabilityChecker = True # Checks the result of each function call, if set to false then stable will be set to false\r\n # Go through each atom\r\n for i in range(len(self.mol)):\r\n # Go through each spike\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.mol[i].spikeArray[j].bonded == True:\r\n stabilityChecker = self.stabilitySpike(self.mol[i].spikeArray[j])\r\n if stabilityChecker == False:\r\n stable = False\r\n #print (stable)\r\n if stable == True:\r\n print(\"No Bonds have broken \\n\")\r\n else:\r\n print (\"Bonds have broken \\n\")\r\n return stable", "def collision(self, model, new_location):\n\n within_bounds = all(model.boundaries[0] <= new_location) and all(new_location <= model.boundaries[1])\n\n if not within_bounds:\n\n collide = True\n\n elif self.neighbourhood(model, new_location):\n\n collide = True\n\n else:\n\n collide = False\n\n return collide", "def is_overlap(bb1, bb2):\n l1, t1, r1, b1 = bb1['x'], bb1['y'], bb1['x']+bb1['w'], bb1['y']+bb1['h']\n l2, t2, r2, b2 = bb2['x'], bb2['y'], bb2['x']+bb2['w'], bb2['y']+bb2['h']\n\n if r1 > l2 and r2 > l1 and b2 > t1 and b1 > t2:\n return True\n else:\n return False", "def is_in_collision_line(self, a, b):\n return abs((b[0]-a[0])*self.x + (a[1]-b[1])*self.y + (a[0]-b[0])*b[1] + (b[1]-a[1])*a[0]) /\\\n sqrt((b[0]-b[1])**2 + (a[1]-b[1])**2 + 0.0000001)< self.r", "def is_bound(pos1, el1, pos2, el2):\n threshold = 0.1\n if el1 == 'H' or el2 == 'H':\n threshold = 0.2\n if np.linalg.norm(np.array(pos1) - np.array(pos2)) < covalence_radius[el1] + covalence_radius[el2] + threshold:\n return True\n return False", "def check_obstructed(r1,r2): \n \n if r1==r2:\n return False\n \n #Densely sample line connecting r1 and r2.\n #If any of those sampled points is inside the rectangle, then the \n #line of sight intersects the rectangle and the tower's view is\n #obstructed.\n NP = 1000\n sampled_x = np.linspace(r1[0],r2[0],NP)\n sampled_y = np.linspace(r1[1],r2[1],NP)\n for x,y,w,h in self.coordinates__obstacles:\n for pt in xrange(NP):\n if (sampled_x[pt] > x) and (sampled_x[pt] < x+w) and \\\n (sampled_y[pt] > y) and (sampled_y[pt] < y+h):\n return True\n return False", "def is_collision_at(self, x, y):\n return self._on_post(x, y)", "def cell_is_blocked(self, y, x, map_data):\n symbol = map_data[y][x]\n # collision: obstacle, bridge, mirror (all types), anti-tank (all types)\n if symbol == self.OBSTACLE_SYMBOL or symbol == self.BRIDGE_SYMBOL or symbol == self.BRICK_SYMBOL or \\\n symbol == self.MIRROR_UL_SYMBOL or symbol == self.MIRROR_UR_SYMBOL or \\\n symbol == self.MIRROR_DL_SYMBOL or symbol == self.MIRROR_DR_SYMBOL or \\\n symbol == self.ANTI_TANK_UP_SYMBOL or symbol == self.ANTI_TANK_DOWN_SYMBOL or \\\n symbol == self.ANTI_TANK_LEFT_SYMBOL or symbol == self.ANTI_TANK_RIGHT_SYMBOL or \\\n symbol == self.ANTI_TANK_DESTROYED_SYMBOL:\n return True\n return False", "def is_approaching(self, other_particle):\n if self.pos_x < other_particle.pos_x:\n d_v_x = self.velocity_x - other_particle.velocity_x\n else:\n d_v_x = other_particle.velocity_x - self.velocity_x\n\n if self.pos_y < other_particle.pos_y:\n d_v_y = self.velocity_y - other_particle.velocity_y\n else:\n d_v_y = other_particle.velocity_y - self.velocity_y\n\n return d_v_x > 0 or d_v_y > 0", "def _check_collisions(self, link_pose_mat, avoidance_radius):\n for link_pose in link_pose_mat:\n # only use x,y,z from link pose\n x_3x1 = np.array((link_pose[0, 0], link_pose[0, 1], link_pose[0, 2]))\n if self.check_collision(x_3x1, avoidance_radius):\n return True\n return False", "def is_corridor_cell(cell: Cell) -> bool:\n open_walls = list(cell.open_walls)\n return len(open_walls) == 2 and open_walls[0].opposite == open_walls[1]", "def wall_collision(self):\n border_box_pos_1 = self.box_1.x - self.box_1.width/2\n\n if (border_box_pos_1) <= 0:\n return True\n else:\n return False", "def overlap(component1, component2):\n if component1[0].start <= component2[0].stop and component2[0].start <= component1[0].stop:\n if component1[1].start <= component2[1].stop and component2[1].start <= component1[1].stop:\n return True\n return False", "def collide(k, steer, b, pnjs):\r\n\tglobal translation\r\n\tdirection, sens = translation[steer]\r\n\r\n\tx0, y0 = Background.getOrigin(b)\r\n\r\n\tx1 = k[\"x\"] + int(direction==\"x\")*sens\r\n\ty1 = k[\"y\"] + int(direction==\"y\")*sens\r\n\r\n\r\n\tif x1 < 0 or x1 >= b[\"COLS\"] or y1 < 0 or y1 >= b[\"ROWS\"]:\r\n\t\treturn True # Hors-écran\r\n\r\n\tfor pnj in pnjs:\r\n\t\tpx, py = PNJ.getPos(pnjs[pnj])\r\n\t\tif x1+x0 == px and y1+y0 == py:\r\n\t\t\treturn True # Collision avec un pnj\r\n\r\n\tncase = Background.getCase(b, x0+x1, y0+y1)\r\n\r\n\tif not ncase[\"crossable\"]:\r\n\t\treturn True # Case dure\r\n\r\n\treturn False", "def _bbox_overlap(self, other):\n reg0 = self.bbox\n reg1 = other.bbox\n return (reg0[0] <= reg1[2] and reg1[0] <= reg0[2] and\n reg0[1] <= reg1[3] and reg1[1] <= reg0[3])" ]
[ "0.6964965", "0.6896832", "0.67123353", "0.6666151", "0.6524003", "0.6504052", "0.6484017", "0.6371618", "0.63714135", "0.62512064", "0.6232055", "0.62242526", "0.62045306", "0.6202152", "0.6201165", "0.6185181", "0.61452067", "0.6140926", "0.6111029", "0.60970056", "0.6095085", "0.6046574", "0.60040766", "0.5955441", "0.59438086", "0.5938511", "0.59355223", "0.5932986", "0.5928103", "0.589011", "0.58899754", "0.58775276", "0.58664256", "0.5849968", "0.5849715", "0.58265805", "0.5799986", "0.57859063", "0.5772843", "0.57415754", "0.57409525", "0.57389843", "0.5735567", "0.57297397", "0.5723642", "0.5713924", "0.5713122", "0.5707466", "0.5696045", "0.56729174", "0.5672055", "0.56663185", "0.56626475", "0.5650789", "0.5646787", "0.56411266", "0.56358224", "0.56284565", "0.56117666", "0.5604144", "0.56037104", "0.5597411", "0.55875474", "0.55813617", "0.5572227", "0.55457944", "0.5544463", "0.5543737", "0.5538512", "0.5530158", "0.55228686", "0.5521949", "0.5518081", "0.55024743", "0.54986036", "0.54966956", "0.54965514", "0.5492332", "0.54815984", "0.54778516", "0.54749787", "0.54739803", "0.5473186", "0.54709774", "0.54705733", "0.54685915", "0.5457423", "0.5449096", "0.54458785", "0.5443556", "0.54422206", "0.54335463", "0.5429134", "0.5426427", "0.54260683", "0.54238975", "0.54224145", "0.5413893", "0.54090965", "0.5406738" ]
0.65765357
4
Get the size of the angle formed by two bonds which share common atom.
def __get_angle(self, names, vecA, vecB): pivot = max(names, key=names.count) if names[0] != pivot: # Atoms needs to be order to pick vectors correctly vecA = vecA * -1 if names[2] != pivot: vecB = vecB * -1 radians = vecA.AngleTo(vecB) angle = 180 / math.pi * radians return angle
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_length(self):\n return AtomMath.length(self.atom1.position - self.atom2.position)", "def _bond_dist(geom, a1, a2):\n if isinstance(geom, np.ndarray):\n geom = geom.flatten().tolist()\n a13 = a1 * 3\n a23 = a2 * 3\n\n xd = (geom[a13] - geom[a23])**2\n yd = (geom[a13 + 1] - geom[a23 + 1])**2\n zd = (geom[a13 + 2] - geom[a23 + 2])**2\n\n return (xd + yd + zd)**0.5", "def distance(cls, atom_1, atom_2):\n\t\t\n\t\treturn np.linalg.norm((atom_1-atom_2).atom_loc)", "def length(a, b):\n return sqrt((a[0] - b[0]) * (a[0] - b[0]) + (a[1] - b[1]) * (a[1] - b[1]))", "def cable_length(self):\n skel = self.physical_space(copy=False)\n\n v1 = skel.vertices[skel.edges[:,0]]\n v2 = skel.vertices[skel.edges[:,1]]\n\n delta = (v2 - v1)\n delta *= delta\n dist = np.sum(delta, axis=1)\n dist = np.sqrt(dist)\n\n return np.sum(dist)", "def length(self):\n return math.sqrt(\n (self.endpoint_a.northing - self.endpoint_b.northing) ** 2 +\n (self.endpoint_a.easting - self.endpoint_b.easting) ** 2\n )", "def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))", "def Agl2ArcLen(self,agl):\r\n\r\n return (self.distance_between_wheels/2)*agl", "def dist(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return length(sub(first,other))", "def calculate_angle(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.atan2(dy, dx) * 180.0 / math.pi", "def total_edge_length(e1, e2):\n return cumulative_edge_length(e1) + cumulative_edge_length(e2)", "def ball_dist(b1, b2):\n return math.sqrt((b1.x - b2.x) ** 2 + (b1.y - b2.y) ** 2)", "def distance_to(self, other):\n ox, oy = other\n return math.hypot(self[0] - ox, self[1] - oy)", "def __len__(self):\n return len(self.qc_mol.atoms) + len(self.br_mol.atoms) + len(self.pc_mol.atoms)", "def __len__(self):\n from math import sqrt\n\n #nicer notation to make it easier to read.\n\n a, b = self.x, self.y\n\n return int(sqrt(a**2 + b**2))", "def dist(gene1, gene2):\n return abs(len(gene1.goal) - len(gene2.goal))", "def _geodesic_distance(mesh, face1, face2, edge):\n edge_center = (mesh.vertices[edge[0]] + mesh.vertices[edge[1]]) / 2\n return _list_length(_list_minus(edge_center, _face_center(mesh, face1))) + \\\n _list_length(_list_minus(edge_center, _face_center(mesh, face2)))", "def calculate_distance(self, other):\n return math.sqrt((self.center[0] - other.center[0]) ** 2 + (self.center[1] - other.center[1]) ** 2)", "def hamming_dist(self):\r\n distance = 0\r\n distance = abs(len(self.s1) - len(self.s2))\r\n distance += sum(i1 != i2 for i1,i2 in zip(self.s2,self.s1))\r\n return distance", "def get_length(self) -> np.float64:\n\n return np.float64(\n sqrt(\n (self.node1.x - self.node2.x) ** 2\n + (self.node1.y - self.node2.y) ** 2\n )\n )", "def calculate_distance(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.sqrt(dy * dy + dx * dx)", "def shared_element_size(self):\n norm = self.shared_element_norm\n return 0.5 * np.linalg.norm(norm, axis=1)", "def _calc_ap(self, mol):\n matches = mol.GetSubstructMatches(self.aromatic_query)\n return len(matches) / mol.GetNumAtoms()", "def get_angle_rad_between_joints(joint_a: Joint2D, joint_b: Joint2D) -> float:\n return math.atan2(joint_a.y - joint_b.y, joint_a.x - joint_b.x)", "def lenght(self):\n from math import sqrt\n\n #nicer notation to make it easier to read.\n\n a, b = self.x, self.y\n\n return sqrt(a**2 + b**2)", "def distance_between(alat, alon, blat, blon):\n alat, alon = radians(90-alat), radians(alon)\n blat, blon = radians(90-blat), radians(blon)\n return 6373*acos(sin(alat)*sin(blat)*cos(alon-blon) + cos(alat)*cos(blat))", "def get_angle_between(self, other):\n cross = self.x*other[1] - self.y*other[0]\n dot = self.x*other[0] + self.y*other[1]\n return math.atan2(cross, dot)", "def dist(self, one, two):\n return np.sqrt((one[0] - two[0]) ** 2 + (one[1] - two[1]) ** 2)", "def angle(self, other):\n return acosd(self.normalized().dot(other.normalized()))", "def add_edge_length(self, a, b):\n return tuple(sum(x) for x in zip(a, b))", "def chord_dist(n1, n2):\n return min(((n2.node_id - n1.node_id) % (2 ** config.ring_size_bits)),\n ((n1.node_id - n2.node_id) % (2 ** config.ring_size_bits)),\n ) / float(2 ** config.ring_size_bits)", "def arc_length(angle_1, angle_2, r=1.):\n # Compute the angle between the two inputs between 0 and 2*pi.\n theta = np.mod(angle_2 - angle_1, 2*pi)\n if theta > pi:\n theta = theta - 2 * pi\n # Return the length of the arc\n L = r * np.abs(theta)\n return(L)", "def distance(mass_1: ObjectMass, mass_2: ObjectMass) -> int:\n\n # collect orbit hops\n orbits_1 = mass_1.get_orbit_hops()\n\n orbits_2 = mass_2.get_orbit_hops()\n\n # find common orbit hop with least amount of hops\n common_hops: set = orbits_1.keys() & orbits_2.keys()\n\n hop = common_hops.pop()\n smallest_total_hops = orbits_1[hop] + orbits_2[hop]\n for hop in common_hops:\n total_hops = orbits_1[hop] + orbits_2[hop]\n\n if total_hops < smallest_total_hops:\n smallest_total_hops = total_hops\n\n return smallest_total_hops", "def get_diameter(self) -> float:\r\n \r\n return (self.box[3] - self.box[1] + self.box[2] - self.box[0]) / 2", "def compute_size(self):\n length = np.max(np.max(self.positions, axis=1) -\n np.min(self.positions, axis=1))\n return length + 2*self.get_radii().max()", "def calculate_alignment(boids: List[b.Boid]) -> Tuple[float, float]:\n align_x = 0\n align_y = 0\n for boid in boids:\n align_x += boid.v * math.cos(boid.direction)\n align_y += boid.v * math.sin(boid.direction)\n return align_x / len(boids), align_y / len(boids)", "def get_distance(self, star):\n if self == star:\n return 0\n\n a_car = self.get_cartesian_coords()\n b_car = star.get_cartesian_coords()\n dab = math.degrees(math.acos(a_car[0] * b_car[0] +\n a_car[1] * b_car[1] +\n a_car[2] * b_car[2]))\n return dab", "def calculate_distance(atom1,atom2): #dot string to show when you go into the help doc of this function\n x_distance = atom1[0]-atom2[0]\n y_distance = atom1[1]-atom2[1]\n z_distance = atom1[2]-atom2[2]\n distance = numpy.sqrt(x_distance**2+ y_distance**2+z_distance**2)\n return distance", "def _angle_between(self, point_1, point_2):\n angle_1 = math.atan2(point_1.y, point_1.x)\n angle_2 = math.atan2(point_2.y, point_2.x)\n return angles.shortest_angular_distance(angle_1, angle_2)", "def distance(a, b):\n ax, ay = a\n bx, by = b\n dx = bx - ax\n dy = by - ay\n return (abs(dx) + abs(dy) + abs(dx - dy)) / 2", "def dist(self, one, two):\n return sum((one[0] != two[0], one[1] != two[1]))", "def bond_dist_delta(ase_mol1, ase_mol2):\n #convert to molmod\n mol1 = to_molmod(ase_mol1)\n mol2 = to_molmod(ase_mol2)\n\n #get bond distances between neighbouring carbon atoms\n mol1_bdists_inds = bond_distances_v2(mol1)\n #seperate the bond distances and the atom indices the bonds correspond to\n #nb indexes are python_like so start at zero programs (e.g. pyMol/Avogadro) often number atoms starting at 1\n mol1_bdists, mol1_inds = zip(*mol1_bdists_inds)\n\n mol2_bdists_inds = bond_distances_v2(mol2, bonds=mol1_inds)\n mol2_bdists, mol2_inds = zip(*mol2_bdists_inds)\n\n if mol1_inds != mol2_inds:\n raise RuntimeError('Comparison of bond distances for different molecules not yet implemented')\n\n mol1_bdists = np.array(mol1_bdists)\n mol2_bdists = np.array(mol2_bdists)\n\n delta_bdists = mol1_bdists - mol2_bdists\n return np.array([mol1_inds, delta_bdists])", "def distance_pbc(cls, atom_1, atom_2):\n\t\tif atom_1.box_dim is None or atom_2.box_dim is None:\n\t\t\traise Exception(\"simulation box size has not been specified\")\n\t\tif atom_1.box_dim != atom_2.box_dim:\n\t\t\traise Exception(\"simulation box size does not match\")\n\t\t\n\t\t[lx,ly,lz] = [atom_2.box_dim[0],atom_2.box_dim[1],atom_2.box_dim[2]]\n\t\t\n\t\t_pair_list = np.array([[0,0,0],[lx,0,0],[-lx,0,0],[0,ly,0],[0,-ly,0],[0,0,lz],[0,0,-lz]])\n\t\t\n\t\t_pair_distance = []\n\t\t\n\t\tfor _pair in _pair_list:\n\t\t\t_curr_pair_distance = Atom.distance(atom_1, Atom((np.array(atom_2.atom_loc) + _pair).tolist()))\n\t\t\t_pair_distance.append(_curr_pair_distance)\n\t\treturn min(_pair_distance)", "def calc_distance(first: Waypoint, second: Waypoint) -> int:\n return int(distance.vincenty(first.coords(), second.coords()).m)", "def angle(a: Point, b: Point) -> int:\n ang = math.degrees(math.atan2(b.y - a.y, b.x - a.x)) + 90\n return ang + 360 if ang < 0 else ang", "def get_euclid_distance_to(self, atom):\n return linalg.norm(self.get_coords() - atom.get_coords())", "def two_point_length(tuple1, tuple2):\n tot_sum = 0\n tot_sum += pow(tuple1[0]-tuple2[0], 2)\n tot_sum += pow(tuple1[1]-tuple2[1], 2)\n tot_sum += pow(tuple1[2]-tuple2[2], 2)\n final_len = pow(tot_sum, 1/2)\n return final_len", "def collisionAngle(obj1, obj2):\n vec1 = obj1.vec\n vec2 = obj2.vec\n n1 = np.linalg.norm(vec1)\n n2 = np.linalg.norm(vec2)\n return abs(np.cross(vec1,vec2)/(n1*n2))", "def baselength(ant_ID1, ant_ID2):\n return np.linalg.norm(baseline(ant_ID1, ant_ID2))", "def common_prefix_len(self, other: \"ProofPath\") -> int:\n if self.start() == other.start():\n return self.match_len(other, self.start())\n\n return 0", "def distanceTwoPoints(self,A,B):\n #productive\n # used by addNeedleToScene\n profprint()\n length = ( (A[0]-B[0])**2 + (A[1]-B[1])**2 + (A[2]-B[2])**2 ) ** 0.5\n return length", "def dist(self, other: Coordinate) -> int:\n return abs(other.x - self.x) + abs(other.y - self.y)", "def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre", "def calc_mainchain_bond_angle(self):\n aN = self.get_atom('N')\n aCA = self.get_atom('CA')\n aC = self.get_atom('C')\n aO = self.get_atom('O')\n aCB = self.get_atom('CB')\n\n naN = None\n naCA = None\n next_res = self.get_offset_residue(1)\n if next_res:\n naN = next_res.get_atom('N')\n naCA = next_res.get_atom('CA')\n\n N_CA_C = AtomMath.calc_angle(aN, aCA, aC)\n CA_C_O = AtomMath.calc_angle(aCA, aC, aO)\n N_CA_CB = AtomMath.calc_angle(aN, aCA, aCB)\n CB_CA_C = AtomMath.calc_angle(aCB, aCA, aC)\n CA_C_nN = AtomMath.calc_angle(aCA, aC, naN)\n C_nN_nCA = AtomMath.calc_angle(aC, naN, naCA)\n\n return (N_CA_C, N_CA_CB, CB_CA_C, CA_C_O, CA_C_nN, C_nN_nCA)", "def calMeasuredContactAngle(self):\n #account the base\n bottomLength = 0\n arrayHeight = np.empty([0, ], dtype = 'int64')\n for i in sp.arange(self.nx):\n if (self.densityFluid1[1, i] >= 0.485):\n bottomLength += 1\n #account the height\n for m in sp.arange(self.nx):\n tmpHeight = 0\n for n in sp.arange(1, self.ny - 1):\n if (self.densityFluid1[n, m] >= 0.485):\n tmpHeight += 1\n arrayHeight = np.append(arrayHeight, tmpHeight)\n heightH = np.amax(arrayHeight)\n #radius of droplet\n radiusD = (4. * np.power(heightH, 2.) + np.power(bottomLength, 2.)) / \\\n (8. * heightH)\n contactAngle = np.arctan((bottomLength) / (2. * (radiusD - heightH))) \n return contactAngle", "def dist(a, b):\n return np.sum((a-b)**2.0)**.5", "def size(self):\r\n return len(atoms)", "def size(self):\n height = 0\n width = 0\n\n mother = self.mother()\n father = self.father()\n\n for parent in self.parents():\n if not parent is None:\n pw, ph = parent.size()\n width += pw\n height += ph\n\n if width > 0:\n width += self._hmargin*2\n\n return width, height", "def distanceTwoPoints(self, A, B):\r\n # productive\r\n # used by addNeedleToScene\r\n if frequent: profprint()\r\n length = ((A[0] - B[0]) ** 2 + (A[1] - B[1]) ** 2 + (A[2] - B[2]) ** 2) ** 0.5\r\n return length", "def calc_angle(atom_R, env_R_arr1, env_R_arr2):\n assert atom_R not in env_R_arr1\n assert atom_R not in env_R_arr2\n atom_env1 = env_R_arr1 - atom_R # (num_env1, 3)\n atom_env2 = env_R_arr2 - atom_R # (num_env2, 3))\n norm1 = np.linalg.norm(atom_env1, axis=1)\n norm2 = np.linalg.norm(atom_env2, axis=1)\n cosine = atom_env1.dot(atom_env2.T) / np.outer(norm1, norm2)\n cosine[cosine > 1.0] = 1.0\n return np.arccos(cosine)", "def angle_similarity(l1, l2):\n return angle(l1, l2)", "def get_adjacency_distance(self, other):\n if self.size != other.size:\n raise ValueError(\"The permutations must be of the same size.\")\n self_adj_mat = self.get_adjacency_matrix()\n other_adj_mat = other.get_adjacency_matrix()\n n_adj = 0\n for i in xrange(self.size):\n for j in xrange(self.size):\n if i == j:\n continue\n if self_adj_mat[i, j] * other_adj_mat[i, j] == 1:\n n_adj += 1\n d = self.size - n_adj - 1\n return d", "def _histogram_intersection_distance(a, b):\n # branching version\n #return np.vstack((a, b)).min(axis=0).sum()\n\n # Non-branching version\n # noinspection PyUnresolvedReferences\n return (a + b - np.abs(a - b)).sum() * 0.5", "def total_length(self):\n # YOUR CODE HERE\n return abs(self.radius*self.angle)", "def bond(self):\r\n return self.A.shape[-1]", "def _angular_distance(mesh, face1, face2): # 其实是余弦距离\n angular_distance = (1 - _list_cos(face1.normal, face2.normal))\n if _list_multiplication(face1.normal, (_list_minus(_face_center(mesh, face2), _face_center(mesh, face1)))) < 0:\n # convex angles are not that bad so scale down distance a bit\n # 凸角不是那么糟糕,所以要把距离缩小一些。\n angular_distance *= eta\n return angular_distance", "def Length(self):\n xyza = self.ga_ref.get_position() + self.wa\n xyzb = self.gb_ref.get_position() + self.wb\n if self.gc is not None:\n xyzc = self.gc_ref.get_position() + self.wc\n xa, ya, za = xyza\n length = self._integrate(\n xyza - xa,\n xyzb - ya,\n xyzc - za,\n )\n else:\n length = np.linalg.norm(xyzb - xyza)\n return length", "def __len__(self):\n \n return len(self.num_atoms)", "def distance_to(self, other):\n dx = other.x - self.x\n dy = other.y - self.y\n return math.sqrt(dx ** 2 + dy ** 2)", "def angle(self) -> int:", "def __len__(self):\n return max(self.A_size, self.B_size)", "def length_dist(self,synset_1, synset_2):\n\t l_dist = sys.maxsize\n\t if synset_1 is None or synset_2 is None: \n\t return 0.0\n\t if synset_1 == synset_2:\n\t # if synset_1 and synset_2 are the same synset return 0\n\t l_dist = 0.0\n\t else:\n\t wset_1 = set([str(x.name()) for x in synset_1.lemmas()]) \n\t wset_2 = set([str(x.name()) for x in synset_2.lemmas()])\n\t if len(wset_1.intersection(wset_2)) > 0:\n\t # if synset_1 != synset_2 but there is word overlap, return 1.0\n\t l_dist = 1.0\n\t else:\n\t # just compute the shortest path between the two\n\t l_dist = synset_1.shortest_path_distance(synset_2)\n\t if l_dist is None:\n\t l_dist = 0.0\n\t # normalize path length to the range [0,1]\n\t return math.exp(-self.ALPHA * l_dist)", "def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))", "def get_interaction_length(self):\n return self.radius + 2.0 #in um", "def get_diameter(self, t):\r\n if not t:\r\n return 0\r\n left_diam = self.get_diameter(t.left)\r\n right_diam = self.get_diameter(t.right)\r\n left_ht = self.get_height(t.left)\r\n right_ht = self.get_height(t.right)\r\n return max(max(left_diam , right_diam) , left_ht+right_ht+1)", "def distance_to(self, other):\n x0,y0 = self.x, self.y\n x1,y1 = other.x, other.y\n dist = math.sqrt((x1-x0) ** 2 + (y1-y0) ** 2)\n return int(dist)", "def __len__(self):\n return len(self.atom_rings)", "def angle_between_two(self, other):\n # angle = math.atan2(other.position.y - self.position.y,\n # other.position.x - self.position.x)\n minus = other.position - self.position\n angle = math.atan2(minus.y, minus.x)\n return angle", "def angle(first, other=FreeCAD.Vector(1,0,0)):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return math.acos(dotproduct(normalized(first),normalized(other)))", "def intersection_size(comm_1, comm_2):\n if comm_1 == MPI.COMM_NULL or comm_2 == MPI.COMM_NULL:\n return None\n group_1 = comm_1.Get_group()\n group_2 = comm_2.Get_group()\n inter_group = MPI.Group.Intersect(group_1, group_2)\n return inter_group.Get_size()", "def undirected_diameter(self) -> int:\n return nx.diameter(self.to_undirected())", "def CalculateCompassDifference(a, b):\n delta = NormalizeAngle(a - b)\n return delta", "def total_edge_angle(e1, e2):\n e1_source = section.index(e1[0])\n e2_target = section.index(e2[1])\n\n \"\"\" Given a pair of vertices, call angle_delta between them. \"\"\"\n f = lambda pair: utils.angle_delta(self.node_heading[pair[0]], self.node_heading[pair[1]])\n\n \"\"\" Map f onto each pair of adjacent vertices, and return the abs of the summed result. \"\"\"\n return abs(sum(map(f, zip(section[e1_source + 1:e2_target], section[e1_source + 2:e2_target + 1]))))", "def getSize(self):\n if self.subsym == None:\n if self.size == 0:\n return 1\n else:\n return self.size\n else:\n if self.size == 0:\n return self.subsym.getSize()\n else:\n return self.size * self.subsym.getSize()", "def get_bend_port_distances(bend: Component) -> Tuple[float64, float64]:\n p0, p1 = bend.ports.values()\n return abs(p0.x - p1.x), abs(p0.y - p1.y)", "def get_num_ring_atoms(input_mol):\n num_ring_atoms = 0\n split_index = 0\n split_indices = []\n for atom in input_mol.GetAtoms():\n if atom.IsInRing():\n num_ring_atoms += 1\n else:\n split_indices.append(split_index)\n split_index += 1\n return num_ring_atoms, split_indices", "def get_distance(self, other):\n return math.sqrt((self.x - other[0])**2 + (self.y - other[1])**2)", "def calculate_distance_edge(self):\n if self.mu > 0:\n # right interface is intersected next\n dx = self.cell_xr - self.x\n self.next_cell_index = self.cell_index + 1\n else:\n # left interface is intersected next\n dx = self.cell_xl - self.x\n self.next_cell_index = self.cell_index - 1\n\n return dx / self.mu", "def graph_dist(i1: int, g1: nx.Graph, i2: int, g2: nx.Graph) -> t.Tuple[int, int, float]:\n space1, space2 = map(dict, map(mut_space, [g1, g2]))\n d = 0\n for k in set(list(space1) + list(space2)):\n if k in space1 and k in space2:\n d += len(set(space1[k]).symmetric_difference(set(space2[k])))\n continue\n if k in space1:\n d += len(set(space1[k]))\n if k in space2:\n d += len(set(space2[k]))\n return i1, i2, d", "def edit_distance(self, other):\r\n union = len(self) + len(other)\r\n return 1.0 - 2.0*(self.intersection(other)/union)", "def hexagonal_distance(a, b) -> int:\n diffx = abs(a[0] - b[0])\n diffy = abs(a[1] - b[1])\n distance = diffx + max(0, (diffy - diffx)//2)\n return distance", "def dist(a, b):\n return math.sqrt(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2))", "def __len__(self):\n return self.rdmol.GetNumAtoms()", "def Lengths(self):\n\n self.__do_essential_memebers_exist__()\n\n if self.element_type == \"line\":\n coords = self.points[self.elements[:,:2],:]\n lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)\n else:\n # self.GetEdges()\n # coords = self.points[self.all_edges,:]\n coords = self.points[self.elements[:,:2],:]\n lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)\n\n return lengths", "def ab_path_length(trajectory, a, b):\n a_idx = find_closest_index(trajectory, a)\n b_idx = find_closest_index(trajectory, b)\n path = trajectory[a_idx:b_idx]\n step_dists = np.linalg.norm(np.diff(path, axis=0, prepend = np.array([[0,0]])), axis = 1)\n path_length = np.sum(step_dists)\n\n return(path_length)", "def angle_between_edges_2d(endpoint1, common_point, endpoint2):\r\n return geometry.gmAngleBetweenEdges(endpoint1, common_point, endpoint2)", "def lineLength(node1, node2):\n return ((node2[1] - node1[1])**2 + (node2[0] - node1[0])**2)**(1/2)", "def angle_between_vectors(a, b):\n return math.acos(dot_product(a, b) / (length(a) * length(b)))", "def size(self):\n return 1 + self.left.size + self.right.size", "def dist(a: Point, b: Point):\n return (a.x - b.x) ** 2 + (a.y - b.y) ** 2", "def get_angle(p1, p2):\n return math.atan2(p2[1] - p1[1], p2[0] - p1[0])" ]
[ "0.6803342", "0.6031538", "0.60075694", "0.58700997", "0.58499944", "0.58232963", "0.58183986", "0.57610923", "0.57447034", "0.57311267", "0.5706374", "0.5695668", "0.5695543", "0.56807685", "0.5657946", "0.5650897", "0.5618208", "0.5611929", "0.5603994", "0.55947745", "0.5582572", "0.5580545", "0.5575348", "0.5571959", "0.5559001", "0.55453545", "0.5536014", "0.55347854", "0.5523845", "0.5509972", "0.5507938", "0.5496921", "0.54941297", "0.5488831", "0.54714286", "0.5460104", "0.5457906", "0.54556334", "0.5438606", "0.54368234", "0.54327697", "0.5415432", "0.5412411", "0.54057854", "0.5405518", "0.54050255", "0.5396837", "0.539545", "0.5382891", "0.5377028", "0.53734195", "0.5367992", "0.5366606", "0.53553385", "0.534997", "0.5347922", "0.53412527", "0.53367114", "0.5332971", "0.53271097", "0.53229904", "0.531306", "0.5309284", "0.5306809", "0.5300458", "0.5299263", "0.52969307", "0.5292873", "0.5288644", "0.5287785", "0.52839154", "0.5282229", "0.5275651", "0.5273396", "0.5272025", "0.52676666", "0.5260783", "0.526068", "0.52592355", "0.5257097", "0.5252517", "0.5251225", "0.52447855", "0.5244196", "0.52431875", "0.5238513", "0.5233894", "0.52326906", "0.52287686", "0.52230895", "0.52213866", "0.52185535", "0.5214095", "0.52137387", "0.52112997", "0.5210272", "0.5205591", "0.5202817", "0.5202459", "0.5197839", "0.5195958" ]
0.0
-1
Detects whether the structure has a pair or atoms closer to each other than threshold. This can detect structures which may need a template as they can be handled by RDKit correctly.
def has_degenerated_atom_positions(self, threshold): for i in range(0, len(self.conformer.GetNumAtoms())): center = self.conformer.GetAtomPosition(i) point = [center.x, center.y, center.z] surrounding = self.kd_tree.query_ball_point(point, threshold) if len(surrounding) > 1: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_closest_threshold(x: float, y: float, rw_info: pd.DataFrame) -> Tuple[int, int, int]:\n\n min_dist = CONTROL_ZONE_RADIUS\n min_ind = (0, 0, 0)\n\n for ind in rw_info.index:\n\n hp_x = rw_info[0][ind]\n hp_y = rw_info[1][ind]\n\n dist1 = np.sqrt((x - hp_x) ** 2 + (y - hp_y) ** 2)\n\n hp_x = rw_info[2][ind]\n hp_y = rw_info[3][ind]\n\n dist2 = np.sqrt((x - hp_x) ** 2 + (y - hp_y) ** 2)\n\n if dist1 < min_dist:\n min_dist = dist1\n min_ind = (ind, 0, 1)\n elif dist2 < min_dist:\n min_dist = dist2\n min_ind = (ind, 3, 4)\n\n return min_ind", "def isWithinGT(self, a, b):\n return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**0.5 < self.thresh", "def is_bound(pos1, el1, pos2, el2):\n threshold = 0.1\n if el1 == 'H' or el2 == 'H':\n threshold = 0.2\n if np.linalg.norm(np.array(pos1) - np.array(pos2)) < covalence_radius[el1] + covalence_radius[el2] + threshold:\n return True\n return False", "def check_location_confidence(self):\n\t\t## not the best way of doing things, but since the number of targets is fairly small its not a big deal\n\t\tepsilon_pixels = .05 * self.horizontal_resolution #arbitrary confidence factor\n\t\tepsilon_meters = .08\n\t\tpixel_distances = []\n\t\tactual_distances = []\n\t\tnum_observed = 0\n\t\tfor ti in self.targs:\n\t\t\tif ti.props_are_set:\n\t\t\t\tfor tj in self.targs:\n\t\t\t\t\tif tj.props_are_set: \n\t\t\t\t\t\tpixel_dist = np.linalg.norm(tj.position_camera - ti.position_camera)\n\t\t\t\t\t\tactual_dist = np.abs(tj.d_cam_image - ti.d_cam_image)\n\t\t\t\t\t\tif pixel_dist == 0:\n\t\t\t\t\t\t\tpixel_dist = 10000 #ignore two of the same points\n\t\t\t\t\t\t\tactual_dist = 10000\n\t\t\t\t\t\tpixel_distances.append(pixel_dist)\t\n\t\t\t\t\t\tactual_distances.append(actual_dist)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpixel_distances.append(10000)\n\t\t\t\t\t\tactual_distances.append(10000)\n\t\t\telse:\n\t\t\t\tfor _ in self.targs:\n\t\t\t\t\tpixel_distances.append(10000)\n\t\t\t\t\tactual_distances.append(10000)\n\t\tmin_ind_pixel = np.argmin(pixel_distances)\n\t\tmin_ind_actual = np.argmin(actual_distances)\n\t\t#min_ind is encoded in base (num_targets); decode it to find the closest two points\n\t\tbest_guys = [self.targs[min_ind_pixel/len(self.targs)],self.targs[min_ind_pixel%len(self.targs)]]\n\t\tif pixel_distances[min_ind_pixel] > epsilon_pixels or actual_distances[min_ind_actual] > epsilon_meters:\n\t\t\t#measurements are not trustworthy, return nothing\n\t\t\treturn None\n\n\t\treturn best_guys", "def all_gt(self, other):\n return self.x > other.x and self.y > other.y", "def __gt__(self,other):\n self_bounds = self.Bounds\n ndim = self.InferSpatialDimension()\n\n if isinstance(other,Mesh):\n other_bounds = other.Bounds\n mins = (self_bounds[0,:] < other_bounds[0,:]).all()\n maxs = (self_bounds[1,:] > other_bounds[1,:]).all()\n return mins and maxs\n elif isinstance(other,np.ndarray):\n # Otherwise check if an element is within a given bounds\n assert other.shape == (2,ndim)\n mins = (self_bounds[0,:] < other[0,:]).all()\n maxs = (self_bounds[1,:] > other[1,:]).all()\n return mins and maxs\n else:\n raise ValueError(\"Cannot compare mesh with {}\".format(type(other)))", "def is_close_to_collision_simple(self, depth_map=None, *, return_min_dist=False, thres=D2C_THRESHOLD):\n\t\tif depth_map is None:\n\t\t\tdepth_map = self.depth_map()\n\n\t\tmin_dist = depth_map[self._window()].min()\n\t\tis_close = min_dist < D2C_THRESHOLD\n\n\t\tif return_min_dist:\n\t\t\treturn is_close, min_dist\n\t\telse:\n\t\t\treturn is_close", "def below(self,object):\n if( isinstance(object,Feature) ):\n return( self.minY() > object.maxY() )\n elif( isinstance(object,tuple) or isinstance(object,np.ndarray) ):\n return( self.minY() > object[1] )\n elif( isinstance(object,float) or isinstance(object,int) ):\n return( self.minY() > object )\n else:\n logger.warning(\"SimpleCV did not recognize the input type to feature.below(). This method only takes another feature, an (x,y) tuple, or a ndarray type.\")\n return None", "def __gt__(self, other):\n return self.x ** 2 + self.y ** 2 > other.x ** 2 + other.y ** 2", "def test_gt_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1.1, 'Q2.8')\n assert b > a", "def test_boundary_boxes(gt_detection_combo):\n found = False\n overlap_threshold = 0.7\n\n for found_box in gt_detection_combo.detected_boxes:\n if overlap_between(gt_detection_combo.gt_box, found_box) > overlap_threshold:\n found = True\n break\n\n assert found is True", "def above(self,object):\n if( isinstance(object,Feature) ):\n return( self.maxY() < object.minY() )\n elif( isinstance(object,tuple) or isinstance(object,np.ndarray) ):\n return( self.maxY() < object[1] )\n elif( isinstance(object,float) or isinstance(object,int) ):\n return( self.maxY() < object )\n else:\n logger.warning(\"SimpleCV did not recognize the input type to feature.above(). This method only takes another feature, an (x,y) tuple, or a ndarray type.\")\n return None", "def is_close(x, y, thresh=1e-8):\n\n diff = x - y\n return diff > (-thresh) and diff < thresh", "def compare_junctions(\n r1: BioReaders.GMAPSAMRecord,\n r2: BioReaders.GMAPSAMRecord,\n internal_fuzzy_max_dist: int = 0,\n max_5_diff: int = 999999,\n max_3_diff: int = 999999,\n) -> str:\n # assert internal_fuzzy_max_dist >= 0\n # assert max_5_diff >= 0\n # assert max_3_diff >= 0\n strand = r1.strand\n found_overlap = False\n # super/partial --- i > 0, j = 0\n # exact/partial --- i = 0, j = 0\n # subset/partial --- i = 0, j > 0\n for i, x in enumerate(r1.segments):\n # find the first matching r2, which could be further downstream\n for j, y in enumerate(r2.segments):\n if i > 0 and j > 0:\n break\n if overlaps(x, y) > 0:\n found_overlap = True\n break\n if found_overlap:\n break\n if not found_overlap:\n return \"nomatch\"\n # now we have r1[i] matched to r2[j]\n # check that r1[i] and r2[j] match within 5'/3' max diff\n if strand == \"+\":\n if (\n abs(r1.segments[i].start - r2.segments[j].start) > max_5_diff\n or abs(r1.segments[i].end - r2.segments[j].end) > max_3_diff\n ):\n return \"partial\"\n else:\n if (\n abs(r1.segments[i].start - r2.segments[j].start) > max_3_diff\n or abs(r1.segments[i].end - r2.segments[j].end) > max_5_diff\n ):\n return \"partial\"\n\n # if just one exon, then must have less than 5'/3' diff\n # pdb.set_trace()\n\n if len(r1.segments) == 1:\n if len(r2.segments) == 1:\n # if both are single-exon, check that they have less than 5'/3' diff\n if (\n abs(r1.segments[0].start - r2.segments[0].start) <= max_5_diff\n and abs(r1.segments[0].end - r2.segments[0].end) <= max_3_diff\n ):\n return \"exact\"\n else:\n return \"partial\"\n else:\n # case: r1 single exon, r2 multi-exon\n # check that the matching exon is within difference\n # and that the r1 exon does NOT overlap with previous or later exons in r2\n if (\n abs(r1.segments[0].end - r2.segments[j].end) <= max_3_diff\n and abs(r1.segments[0].start - r2.segments[j].start) <= max_5_diff\n and (\n (j == 0 and r1.segments[0].end < r2.segments[1].start)\n or (\n j >= 1\n and r1.segments[0].start > r2.segments[j - 1].end\n and (\n j == len(r2.segments) - 1\n or r1.segments[0].end < r2.segments[j + 1].start\n )\n )\n )\n ):\n return \"subset\"\n else:\n return \"partial\"\n else:\n if len(r2.segments) == 1:\n # case: r1 multi exon, r2 single exon\n # r1.segments[i] matches r2.segments[0]\n # need to check that the r2, being single exon, did not overlap with previous or later r1 exons\n # and also the matching r1, r2 exon does not have huge 5'/3' difference\n if (\n (i == 0 or r1.segments[i - 1].end < r2.segments[0].start)\n and (\n i == len(r1.segments) - 1\n or r1.segments[i].start > r2.segments[0].end\n )\n and (abs(r1.segments[i].start - r2.segments[0].start) <= max_5_diff)\n and (abs(r1.segments[i].end - r2.segments[0].end) <= max_3_diff)\n ):\n return \"super\"\n else:\n return \"partial\"\n else: # both r1 and r2 are multi-exon, check that all remaining junctions agree\n k = 0\n while i + k + 1 < len(r1.segments) and j + k + 1 < len(r2.segments):\n if (\n abs(r1.segments[i + k].end - r2.segments[j + k].end)\n > internal_fuzzy_max_dist\n or abs(r1.segments[i + k + 1].start - r2.segments[j + k + 1].start)\n > internal_fuzzy_max_dist\n ):\n return \"partial\"\n k += 1\n # print i, j, k\n # check that the last matching exon, the ends are with max 5'/3' diff\n if strand == \"+\":\n if abs(r1.segments[i + k].end - r2.segments[j + k].end) > max_3_diff:\n return \"partial\"\n else:\n if abs(r1.segments[i + k].end - r2.segments[j + k].end) > max_5_diff:\n return \"partial\"\n\n if i + k + 1 == len(r1.segments):\n if j + k + 1 == len(r2.segments):\n if i == 0:\n if j == 0:\n return \"exact\"\n else:\n return \"subset\" # j > 0\n else:\n return \"super\"\n else: # r1 is at end, r2 not at end\n if i == 0:\n return \"subset\"\n else: # i > 0\n if (\n abs(r1.segments[i + k - 1].end - r2.segments[j + k - 1].end)\n > internal_fuzzy_max_dist\n or abs(r1.segments[i + k].start - r2.segments[j + k].start)\n > internal_fuzzy_max_dist\n ):\n return \"partial\"\n else:\n return \"concordant\"\n else: # r1 not at end, r2 must be at end\n if j == 0:\n return \"super\"\n else:\n if (\n abs(r1.segments[i + k - 1].end - r2.segments[j + k - 1].end)\n > internal_fuzzy_max_dist\n or abs(r1.segments[i + k].start - r2.segments[j + k].start)\n > internal_fuzzy_max_dist\n ):\n return \"partial\"\n else:\n return \"concordant\"", "def links_with(self, other, tollerance = 0.05):\n return (\n self.start.distance_to(other.start) < tollerance or\n self.start.distance_to(other.end) < tollerance or\n self.end.distance_to(other.end) < tollerance or\n self.end.distance_to(other.start) < tollerance\n )", "def __gt__(self, other):\n return self.weight() > other.weight()", "def dominate(aver_size1, aver_size2, likelibest1, likelibest2):\n return (aver_size1 < aver_size2 and likelibest1 >= likelibest2) or (\n aver_size1 <= aver_size2 and likelibest1 > likelibest2\n )", "def findtailthreshold(v, figpath=None):\n\n # internal constants\n numreps = 3 # number of restarts for the GMM\n maxsz = 1000000 # maximum number of values to consider\n nprecision = 500\n # linearly spaced values between lower and upper robust range\n\n # inputs\n if figpath is None:\n wantfig = 0\n else:\n wantfig = 1\n\n # quick massaging of input\n v2 = v[np.isfinite(v)]\n if len(v2) > maxsz:\n print('warning: too big, so taking a subset')\n v2, _, _ = picksubset(v2, maxsz)\n\n # fit mixture of two gaussians\n gmfit = gmdist(n_components=2, n_init=numreps).fit(v2.reshape(-1, 1))\n\n # figure out a nice range\n rng = robustrange(v2.flatten())[0]\n\n # include the smaller of the two distribution means if necessary\n rng[0] = np.min([rng[0], np.min(gmfit.means_.flatten())])\n\n # include the bigger of the two distribution means if necessary\n rng[1] = np.max([rng[1], np.max(gmfit.means_.flatten())])\n\n # OLD\n # rng = robustrange(v2.flatten())[0]\n\n # evaluate posterior\n allvals = np.linspace(rng[0], rng[1], num=nprecision)\n checkit = gmfit.predict_proba(allvals.reshape(-1, 1))\n\n # figure out crossing\n if checkit[-1,0] > 0.5:\n whdist = 0 # if the first distribution is the higher one on the right\n else:\n whdist = 1 # if the second distribution is the higher one on the right\n\n for ix in range(checkit.shape[0] - 1, -1, -1):\n if checkit[ix, whdist] <= 0.5:\n break\n\n # warn if necessary\n if checkit[ix, whdist] > 0.5:\n print('warning: no crossing of 0.5 found. results may be inaccurate!')\n\n # OLD\n # np.testing.assert_equal(\n # np.any(checkit[:, 0] > .5) and np.any(checkit[:, 0] < .5),\n # True,\n # err_msg='no crossing of 0.5 detected')\n # ix = np.argmin(np.abs(checkit[:, 0]-.5))\n\n # return it\n f = allvals[ix]\n\n # prepare other outputs\n mns = gmfit.means_.flatten()\n sds = np.sqrt(gmfit.covariances_.flatten())\n if mns[1] < mns[0]:\n mns = mns[[1, 0]]\n sds = sds[[1, 0]]\n\n # start the figure\n if wantfig:\n # make figure\n plt.plot(allvals, checkit)\n plt.plot([allvals[ix], allvals[ix]], plt.ylim(), 'k-', linewidth=2)\n plt.title('Posterior Probabilities')\n plt.savefig(figpath)\n plt.close('all')\n\n return f, mns, sds, gmfit", "def __gt__(self, other):\n return self.abs2phy.__gt__(other)", "def __gt__(self, other):\n return self.abs2phy.__gt__(other)", "def compare_thresholded_data_with_models(self):\n pass", "def threshold_closest_points(P1, P2, threshold):\r\n\r\n discard_1 = []\r\n discard_2 = []\r\n\r\n for index in range(0,len(P1)):\r\n if euclidean_distance_2(P1[index],P2[index]) <= threshold:\r\n discard_2.append(P2[index])\r\n discard_1.append(P1[index])\r\n\r\n return discard_2, discard_1", "def check_alignment(image, r1, r2):\n \n distance = dist_between_spheres(r1, r2, image.shape[0] / 2. + 10, image.shape[0] / 2.)\n gap_signal = []\n denoised = median_filter(image.copy(), 3)\n \n for j in np.arange(0., image.shape[1]): \n # Take the region around the gap, which later on will be used\n # to define the intensity at the gap between the spheres.\n # The width of the gap is not exact\n if image.shape[1] / 2. + distance + 5 > j > image.shape[1] / 2. - distance - 5:\n gap_signal.append(denoised[image.shape[0] / 2. + 10, j])\n \n centre = np.mean(np.argwhere(np.min(gap_signal) == gap_signal))\n print centre\n print len(gap_signal) / 2.\n print\n \n if abs(centre - len(gap_signal) / 2.) <= 1.5:\n return True\n else:\n return False", "def are_close(coord1, coord2, tolerance=10):\n return vincenty(coord1, coord2).meters < tolerance", "def arecloseenough(x1, x2):\n\n if abs(x1 - x2) <= VERYSMALL:\n return True\n \n return False", "def test__compute_tolerance_distance():\n classifier = classifier_module.Classifier(None)\n L1 = [11.2, 41.43, 1.33]\n L2 = [10.9, 41.45, 1.34]\n L3 = [12.0, 41.4412, 1.001]\n L4 = [11.3, 41.15, 1.12]\n L5 = [11.223, 41.0, 1.31]\n AL = [L1, L2, L3, L4, L5]\n symbol = \"a\"\n classifier._compute_tolerance_distance(AL, symbol)\n tolerance_distance_path = \\\n classifier_module.Classifier._get_file_path( \\\n classifier.files[classifier_module.DISTANCE_TOLERANCE_FILE], symbol)\n file_with_tolerance_distance = \\\n open(tolerance_distance_path, 'r')\n tolerance_distance = float(file_with_tolerance_distance.readline())\n file_with_tolerance_distance.close()\n assert fabs(tolerance_distance - 0.5506099238118276) < epsilon", "def __gt__(self, other):\n return self.weight > other.weight", "def match(self, enc, threshold=None, optimize=False):\n\n\t\tif len(self.encodings) == 0:\n\t\t\t# no encodings yet\n\t\t\treturn -1, 1.0\n\n\t\t# compare enc to known-face-encodings to get all euclidean distances.\n\t\tdistances = np.linalg.norm(self.encodings - enc, axis=1)\n\n\t\t# get the minimum distance\t\t\n\t\tface_index = np.argmin(distances)\n\t\tmin_distance = distances[face_index]\n\n\t\t# optimization if min_distance >= threshold\n\t\tif threshold and min_distance >= threshold:\n\t\t\tif not optimize:\n\t\t\t\treturn -1, min_distance\n\n\t\t\tprint('*** distance > threshold ({} > {})'.format(min_distance, threshold))\n\t\t\ttop_two = np.argsort(distances)[:2]\n\t\t\tidx1 = top_two[0]\n\t\t\tname1 = self.get_name(idx1)\n\t\t\tprint('\\ttop 1: {} - {:.5f}'.format(name1, distances[idx1]))\n\t\t\tidx2 = top_two[1]\n\t\t\tname2 = self.get_name(idx2)\n\t\t\tprint('\\ttop 2: {} - {:.5f}'.format(name2, distances[idx2]))\n\t\t\t\n\t\t\td1 = distances[idx1]\n\t\t\td2 = distances[idx2]\n\n\t\t\t# discard if names differ\n\t\t\tif name1 != name2:\n\t\t\t\tif abs(d1 - d2) < 0.06:\n\t\t\t\t\treturn -1, min_distance\n\t\t\telse: # name1 == name2\n\t\t\t\t# discard if same name but distance differ (2 after point)\n\t\t\t\tif int(d1 * 100) != int(d2 * 100):\n\t\t\t\t\treturn -1, min_distance\n\t\t\t\n\t\treturn face_index, min_distance", "def is_close_to_collision(self, od_bbox=None, min_dists=None, *, return_min_dist=False, thres=D2C_THRESHOLD):\n\t\tif od_bbox is None or min_dists is None:\n\t\t\tgen = self.loop(od_bbox=True, dist_to_col=True)\n\t\t\tod_bbox, dist_to_col = next(gen)\n\n\t\tmin_dists = min_dists[self._boxes_in_window(od_bbox)]\n\n\t\tmin_dist = min_dists[np.isfinite(min_dists)].min()\n\t\tis_close = min_dist < D2C_THRESHOLD\n\n\t\tif return_min_dist:\n\t\t\treturn is_close, min_dist\n\t\telse:\n\t\t\treturn is_close", "def merge(pair_list, threshold=need_to_fill):\n pair_list = list(pair_list)\n slope_list = []\n # Calculate slope of the starting coordinate of\n # every other box to the first box in the tuple.\n for pair in pair_list:\n # Get the first boxe's coordinate.\n # Due to the former process, the first box will be\n # the one on the left side.\n if len(pair) == 1:\n continue\n x0, y0, w0, h0 = pair[0]\n\n tmp_list = []\n for i in range(1, len(pair)):\n xi, yi, wi, hi = pair[i]\n # Take copy to exact up or below place into consideration.\n if xi == x0:\n if yi > y0:\n slope = float(\"inf\")\n else:\n slope = -float(\"inf\")\n else:\n slope = (yi - y0) * 1.0 / (xi - x0)\n # tmp list will look like\n # [slope1, slope2, ...]\n tmp_list.append(slope)\n\n # Slope list will look like\n # [[slope1, slope2...], [slope1, slope2...], ...]\n slope_list.append(tmp_list)\n\n # Then we will need to find pairs with same slope.\n # And cluster the boxes.\n # Here we take slope list as a vector and calculate their distance,\n # then use a threshold to get similar ones.\n results = []\n\n while len(slope_list) != 0:\n # Save tuples that should be merged.\n merge_boxes = []\n\n # np array will make euclide distance calculation more convienent.\n vector_r = np.array(slope_list[0])\n merge_boxes.append(pair_list[0])\n\n # Always keep pair list and slopelist corresponding.\n slope_list = slope_list[1:]\n pair_list = pair_list[1:]\n\n\n for vector, pair in zip(slope_list, pair_list):\n\n # While cauculating euclide diatance, we should take infinity\n # slope into consideration as numpy can not deal with such cases.\n vector_n = np.array(vector)\n\n inf_exist = False\n for slope in vector:\n if slope == float(\"inf\") or slope == -float(\"inf\"):\n inf_exist = True\n for slope in vector_r:\n if slope == float(\"inf\") or slope == -float(\"inf\"):\n inf_exist = True\n\n if inf_exist:\n # Calcuate distance with some pre procss.\n distance = distance_with_inf(vector_n, vector_r)\n else:\n # Calculate distance directly with numpy function.\n distance = np.linalg.norm(vector_n - vector_r)\n\n\n if distance <= threshold:\n merge_boxes.append(pair)\n slope_list.remove(vector)\n pair_list.remove(pair)\n\n\n # Then we process merge_boxes, merge them together and append it\n # to result.\n length = len(merge_boxes[0])\n merge_boxes = np.array(merge_boxes)\n tmp = []\n\n for i in range(0, length):\n tmp.append(get_bound(merge_boxes[:, i]))\n results.append(tmp)\n\n return results", "def test_high_delta_similar_to_threshold_dist(self, sampling_class,\n sampling_method_and_threshold):\n sampling_method, threshold = sampling_method_and_threshold\n # The range we allow around 0.5n\n distance_from_half = 0.01\n # The number of elements we use (computed using Chernoff bounds)\n n = int((6.0 / (distance_from_half**2)) *\n math.log(2 * FAILURE_PROBABILITY_INVERSE, math.e) + 1)\n s = sampling_class(\n threshold=threshold,\n eps=0.1,\n delta=1.0,\n sampling_method=sampling_method)\n for i in range(n):\n s.process(i, 1)\n self.assertGreaterEqual(len(s.elements), (0.5 - distance_from_half) * n)\n self.assertLessEqual(len(s.elements), (0.5 + distance_from_half) * n)", "def calc_tolerance(wt):\n return 1 - wt", "def threshold_iterator(threshold):\n global idf_dict\n dom_doc = xml_util.get_dom_from_xml(\"data/RTE2_dev.preprocessed.xml\")\n pairs = xml_util.get_pairs(dom_doc)\n pair_attributes = xml_util.get_attributes_from_preprocessed_pair_nodes(pairs)\n idf_dict = calculate_idf_dictionary(pair_attributes)\n print len(idf_dict.keys())\n tree_value_pairs = []\n \n # Extracting the actual lemma values from the pair nodes\n for i in range(len(pair_attributes)):\n t,h,id_num,e,ta = pair_attributes[i]\n id_num = int(id_num)\n t_values = xml_util.get_minipar_values_from_text_node(t)\n h_values = xml_util.get_minipar_values_from_text_node(h)\n tree_value_pairs.append((t_values,h_values))\n \n # Calculating distances between text and hypothesis\n distances = []\n for i in range(len(tree_value_pairs)):\n t_tree,h_tree = build_tree(tree_value_pairs[i])\n dist = tree_edit_dist.distance(t_tree, h_tree, idf_cost)\n normalizer = tree_edit_dist.distance(tree_edit_dist.Node(\"root\"), h_tree, idf_cost)\n normalized_dist = float(dist) / float(normalizer)\n distances.append(normalized_dist)\n \n #for d in distances:\n # print d\n \n if threshold == -1:\n for i in range(200):\n threshold = 1.0 - (0.005 * i)\n syntax_matching(pair_attributes, distances, threshold)\n else:\n syntax_matching(pair_attributes, distances, threshold)", "def compare(a, b, *, tol=1e-6):\n if abs(a - b) < tol:\n return 0.0\n elif a > b:\n return 1.0\n else:\n return -1.0", "def space_between(first_value, second_value, bboxes):\n bbox1 = bboxes[second_value][0]\n bbox2 = bboxes[first_value][2]\n space_betw = bbox1 - bbox2\n \n if (space_betw > 6):\n return True\n return False", "def get_closest_intersection(wire1, wire2):\n pass", "def __gt__(self, other: Compound[Scalar]) -> bool:\n return (self._points_set > other._points_set\n if isinstance(other, Multipoint)\n else NotImplemented)", "def _find_best_threshold(self, num_of_steps=20, verbose=False):\n xmin = self.x.min()\n xmax = self.x.max()\n step = (xmax - xmin)/num_of_steps\n \n lower_th = None\n lower_IR = 1\n\n # for each potential threshold\n for threshold in np.arange(xmin+step, xmax, step):\n IR = self._compute_isometric_ratio(threshold)\n \n if IR < lower_IR:\n lower_IR = IR\n lower_th = threshold\n \n self.threshold = lower_th\n if verbose:\n print(f'\\tThreshold:\\t\\t{lower_th}\\n\\tIsometric Ratio:\\t{lower_IR}')", "def test_weighted_trees_satisyfing_cutoff(self):\n sct = LogLikelihoodScoredTreeCollection(self.scored_trees)\n cts = sct.getWeightedTrees(cutoff=0.8)\n expected_trees = [Tree(t) for t in \"((a,b),(c,d));\", \"((a,b),(c,d));\",\n \"((a,b),c,d);\"]\n for i in range(len(cts)):\n cts[i][1].sameTopology(expected_trees[i])\n \n ct = cts.getConsensusTree()\n self.assertTrue(ct.sameTopology(Tree(\"((a,b),(c,d));\")))", "def syntax_matching(pair_attributes, distances, threshold):\n n = len(pair_attributes)\n entailments = [0 for foo in range(n+1)]\n results = [0 for foo in range(n+1)]\n # Calculates entailments and accuracy\n for i in range(n):\n t,h,id_num,e,ta = pair_attributes[i]\n id_num = int(id_num)\n entails = distances[i] < threshold\n entailments[id_num] = \"YES\" if entails else \"NO\"\n results[id_num] = 1 if entailments[id_num] == e else 0\n lexical.output_rte(entailments)\n print \"Threshold: \" + \"%.3f\"%threshold + \" Accuracy: \" + str(float(sum(results)) / float(n))", "def compute_bonds(structure, ops_dict):\n\n default_radii = json.loads(\n pkg_resources.resource_string(__name__, 'data/covradius.json')\n )\n default_radii.update(\n ops_dict['covalent-radii']\n )\n\n bonds = []\n for atm1, atm2 in itertools.combinations(enumerate(structure.atms), 2):\n\n idx1 = atm1[0]\n idx2 = atm2[0]\n symb1 = atm1[1].symb\n symb2 = atm2[1].symb\n coord1 = atm1[1].coord\n coord2 = atm2[1].coord\n\n threshold = default_radii[symb1] + default_radii[symb2]\n dist = linalg.norm(coord1 - coord2)\n\n if dist < threshold:\n bonds.append(\n (idx1, idx2, 1.0) if idx1 < idx2 else (idx2, idx1, 1.0)\n )\n\n return bonds", "def test_ge_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1.1, 'Q2.8')\n assert b > a", "def sanity_check(self):\n score = 0\n curvatures = self._curvature()\n if abs(curvatures[0] - curvatures[1]) / max(curvatures) > 0.15:\n # difference in curvature is more than 15%\n score -= 1\n\n diff_std = np.std(self.right_fitx - self.left_fitx)\n if diff_std > 30:\n # std of the difference between the right lane and left lane is more than 30 pixel\n score -= 1\n\n # roughly parallel\n if abs(self.left_fit[0] - self.right_fit[0]) / max(self.left_fit[0], self.right_fit[0]) > 0.15:\n # difference in slope is more than 15%\n score -= 1\n\n return score", "def find_distance_in_same_type(self):\n pass", "def is_better(self, a: float, best: float) -> bool:\n if self.mode == \"min\" and self.threshold_mode == \"rel\":\n rel_epsilon = 1.0 - self.threshold\n comp = best * rel_epsilon if best >= 0 else best * (1 + self.threshold)\n return a < comp\n\n elif self.mode == \"min\" and self.threshold_mode == \"abs\":\n return a < best - self.threshold\n\n elif self.mode == \"max\" and self.threshold_mode == \"rel\":\n rel_epsilon = self.threshold + 1.0\n return a > best * rel_epsilon\n\n else: # mode == 'max' and epsilon_mode == 'abs':\n return a > best + self.threshold", "def get_tolerances(self) -> Tuple[float, float]:\n return self.rtol, self.atol", "async def distance_threshold(self, *args):\n return await self._rpc.distance_threshold(*args)", "def find_metric_threshold(self):\n logger.info(\"compute metric threshold\")\n\n ### Beaucoup trop lent quand on a beaucoup de models ###\n\n df_results_not_aggregated = self.result_reader.load_all_results(aggregate=False)\n\n if len(df_results_not_aggregated) == 0:\n logger.info(\"threshold = None\")\n return None\n\n main_scorer = \"test_%s\" % self.job_config.main_scorer\n (df_results_not_aggregated[main_scorer].fillna(df_results_not_aggregated[main_scorer].min(), inplace=True))\n min_cv = df_results_not_aggregated.groupby(\"job_id\")[main_scorer].min().values\n delta_min_max_cv = np.median(\n df_results_not_aggregated.groupby(\"job_id\")[main_scorer].apply(lambda x: x.max() - x.min())\n )\n\n if len(min_cv) <= self.min_nb_of_models:\n logger.info(\"threshold = None\")\n return None\n\n min_cv = -np.sort(-min_cv)\n result = min_cv[self.min_nb_of_models] - delta_min_max_cv\n\n # result = np.percentile( min_cv, self._get_quantile(len(min_cv)) * 100)\n # TODO : ici peut etre faire une estimation parametric du quantile avec un Kernel, plus smooth et moins sensible quand peu de valeurs\n\n logger.info(\"threshold : %2.2f\" % result)\n return result", "def get_most_similar(distances):\n threshold = get_threshold(distances)\n most_similar = list()\n distances = sorted(distances, key=lambda x: x[0])\n\n for i in distances:\n if i[0] < threshold:\n most_similar.append(i)\n else:\n break\n\n return most_similar", "def get_pairwise_matches(pos1, descs1, pos2, descs2, up_to=30):\n assert pos1.shape[0] * pos2.shape[0] < 1e8, \\\n \"Too many points: increase cornerness threshold\"\n assert pos1.shape[0] > 10 and pos1.shape[0] > 10, \\\n \"Not enough points: lower cornerness threshold\"\n # get the similarities between all descriptors\n sims = np.dot(descs1, descs2.T)\n # get the best matches\n mi2 = sims.argmax(axis=1).squeeze()\n ms = sims.max(axis=1).squeeze()\n bmi1 = ms.argsort()[::-1][:up_to]\n bmi2 = mi2[bmi1]\n # return their positions\n bp1 = pos1[bmi1]\n bp2 = pos2[bmi2]\n return bp1, bp2", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def computePairCriteria(segment1, segment2, mergedSegments, updatedSpeed, inversedIndex, weights, minValidData,verbose=False):\n \n segment2 = inversedIndex.loc[segment2]\n \n if mergedSegments.loc[segment1]['tag']['highway']!=mergedSegments.loc[segment2]['tag']['highway'] :\n return np.inf\n\n if mergedSegments.loc[segment1].nonNullProp >= minValidData : \n return np.inf\n \n if segment2 == segment1 : return np.inf\n criteronScores = [\n profileSim(segment1,[segment2],updatedSpeed),\n directtion(segment1,[segment2],mergedSegments),\n shareNoEdges(segment1,[segment2],mergedSegments),\n lengthCriterion(segment1, segment2, mergedSegments, minValidData, inversedIndex)\n ]\n if verbose : print(criteronScores)\n return sum(criteronScores*weights)[0]", "def check_sim_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.sim_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.sim_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.sim_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.sim_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.sim_spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass", "def _subtree_above_threshold(self, root, threshold=0):\r\n\r\n # BFS will grab the most distant leaves last\r\n # reverse should guarantee that we get the smallest subtrees\r\n nodes = root.get_terminals()\r\n nodes.reverse()\r\n\r\n # always making n1 < n2 so that we can memoize\r\n # this all could be done in parallel\r\n node_pairs = itertools.ifilter(\r\n lambda (a1, a2): a1.name < a2.name,\r\n itertools.product(nodes, nodes))\r\n\r\n for pair in node_pairs:\r\n distance = self._node_distance(pair[0], pair[1])\r\n if distance and distance < threshold:\r\n return False\r\n\r\n return True", "def is_border(self, threshold):\n\n def dist_to_line(p1, p2, p3):\n return np.linalg.norm(np.cross(p2 - p1, p1 - p3)) / np.linalg.norm(p2 - p1)\n\n total_dist = 0\n for p in self.shape:\n total_dist += dist_to_line(self.shape[0], self.shape[-1], p)\n return total_dist < threshold", "def ring1_isoutside_ring2_cmp_alt(ringlist, ring1_index, ring2_index,\n N_lines2use=opt.alt_sort_N,\n increase_N_if_zero=True, boundary=None):#####TOL\n ring1 = ringlist[ring1_index]\n ring2 = ringlist[ring2_index]\n if ring1.path == ring2.path:\n return 0\n\n dbrlist = ringlist if opt.debug_lines_used_to_sort_full else None\n debug12, debug21 = '', ''\n if opt.debug_lines_used_to_sort:\n rec_num = 0 if increase_N_if_zero else 1\n debug12 = os.path.join(opt.output_directory_debug,\n f'sorting_lines_{ring1_index}-{ring2_index}_it{rec_num}.svg')\n debug21 = os.path.join(opt.output_directory_debug,\n f'sorting_lines_{ring2_index}-{ring1_index}_it{rec_num}.svg')\n\n countHits12 = ring1_isbelow_ring2_numHits(\n ring1, ring2, N_lines2use, debug_name=debug12, ring_list=dbrlist)\n countHits21 = ring1_isbelow_ring2_numHits(\n ring2, ring1, N_lines2use, debug_name=debug21, ring_list=dbrlist)\n if countHits12 == 0 or countHits21 == 0:\n if countHits12 > 0:\n return -1\n elif countHits21 > 0:\n return 1\n elif increase_N_if_zero:\n N_upped = N_lines2use * max(len(ring1.path), len(ring2.path))\n improved_res = ring1_isoutside_ring2_cmp_alt(\n ringlist, ring1_index, ring2_index, N_lines2use=N_upped,\n increase_N_if_zero=False, boundary=boundary)\n if improved_res != 0:\n return improved_res\n elif ring1.isClosed() or ring2.isClosed():\n if opt.manually_fix_sorting:\n return ask_user_to_sort(\n ring1_index, ring2_index, ringlist, make_svg=True)\n else:\n raise Exception(\n \"Problem sorting rings... set \"\n \"'manually_fix_sorting=True' in options4rings.py \"\n \"to fix manually.\"\n )\n else:\n return 0\n else:\n return 0\n\n # neither of the counts were zero\n ratio21over12 = countHits21/countHits12\n try:\n upper_bound = 1.0/percentage_for_disagreement\n except ZeroDivisionError:\n from numpy import Inf\n upper_bound = Inf\n\n if percentage_for_disagreement < ratio21over12< upper_bound:\n\n debug12, debug21 = '', ''\n if opt.debug_lines_used_to_sort:\n debug12 = os.path.join(opt.output_directory_debug,\n f'sorting_lines_{ring1_index}-{ring2_index}_it2.svg')\n debug21 = os.path.join(opt.output_directory_debug,\n f'sorting_lines_{ring2_index}-{ring1_index}_it2.svg')\n\n # still not sure, so use more lines\n N_upped = N_lines2use * max(len(ring1.path), len(ring2.path))\n countHits12 = ring1_isbelow_ring2_numHits(\n ring1, ring2, N_upped, debug_name=debug12, ring_list=dbrlist)\n countHits21 = ring1_isbelow_ring2_numHits(\n ring2, ring1, N_upped, debug_name=debug21, ring_list=dbrlist)\n ratio21over12 = countHits21/countHits12\n\n if percentage_for_disagreement < ratio21over12 < upper_bound:\n # still not sure, ask user, if allowed\n if opt.manually_fix_sorting:\n return ask_user_to_sort(\n ring1_index, ring2_index, ringlist, make_svg=True)\n else:\n raise Exception(\n \"Problem sorting rings... set \"\n \"'manually_fix_sorting=True' in options4rings.py to \"\n \"fix manually.\"\n )\n if countHits12 > countHits21:\n return -1\n elif countHits12 < countHits21:\n return 1\n else:\n return 0", "def decide(el, il, model, threshold):\n\n if model == 0:\n return el >= threshold[0] and il >=threshold[1]\n elif model == 1:\n return el >= threshold[0] or il >= threshold[1]\n elif model == 2:\n return harmonic_mean([el, il]) >= harmonic_mean(threshold)\n else:\n return bool(round(random.random()))", "def isAligned(self):\n return (\n abs(self.desired_distance - self.vision.getDistance())\n <= self.DISTANCE_TOLERANCE\n ) and (abs(self.vision.getHeading()) <= self.HEADING_TOLERANCE)", "def test_lt_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1.1, 'Q2.8')\n assert a < b", "def test_compare(self): \n d1 = heat(\n np.array([[0.5, 1]]),\n np.array([[0.5, 1.1]])\n )\n d2 = heat(\n np.array([[0.5, 1]]),\n np.array([[0.5, 1.5]])\n )\n\n # These are very loose bounds\n assert d1 < d2", "def close(a,b):\n return abs(a-b) < epsilon", "def kinase_distances(structure_distances, by=\"minimum\", coverage_min=0.0):\n\n data = structure_distances\n\n # Filter by coverage\n data = data[data[\"bit_coverage\"] >= coverage_min].reset_index(drop=True)\n # Note: Kinase pair names must be sorted alphabetically to allow grouping by kinase pair names\n kinase_pair_alphabetically_sorted = (\n pd.Series(zip(data[\"kinase.1\"], data[\"kinase.2\"])).map(list).map(sorted)\n )\n data[[\"kinase.1\", \"kinase.2\"]] = kinase_pair_alphabetically_sorted.to_list()\n # Group by kinase pair names (which have been alphabetically sorted before!)\n structure_distances_grouped_by_kinases = data.groupby(by=[\"kinase.1\", \"kinase.2\"], sort=False)\n\n # Get distance values per kinase pair based on given condition\n # Note: For min/max we'd like to know which structure pairs were selected!\n by_terms = \"minimum maximum mean median size std\".split()\n\n if by == \"minimum\":\n kinase_distances = data.iloc[\n structure_distances_grouped_by_kinases[\"distance\"].idxmin()\n ].set_index([\"kinase.1\", \"kinase.2\"])\n elif by == \"maximum\":\n kinase_distances = data.iloc[\n structure_distances_grouped_by_kinases[\"distance\"].idxmax()\n ].set_index([\"kinase.1\", \"kinase.2\"])\n elif by == \"mean\":\n kinase_distances = structure_distances_grouped_by_kinases.mean()[[\"distance\"]]\n elif by == \"median\":\n kinase_distances = structure_distances_grouped_by_kinases.median()[[\"distance\"]]\n elif by == \"size\":\n kinase_distances = structure_distances_grouped_by_kinases.size().to_frame(\"distance\")\n elif by == \"std\":\n kinase_distances = structure_distances_grouped_by_kinases.std()[[\"distance\"]]\n kinase_distances = round(kinase_distances, 3)\n else:\n raise ValueError(f'Condition \"by\" unknown. Choose from: {\", \".join(by_terms)}')\n\n return kinase_distances", "def compare_geometrycollection(config, geometry_x, geometry_y):\n if config in BLIST:\n # print('arc distance: %s' % str(arc_distance(x, y)))\n return arc_distance(geometry_x, geometry_y) < EPOCH_CURVE_RELATIVE\n # return True\n # else:\n # print('arc distance: %s' % str(arc_distance(x, y)))\n # return False\n # else:\n\n if not config in BLIST:\n arct = wkt.loads(geometry_x)\n pgis = wkt.loads(geometry_y)\n result = arct.equals(pgis)\n return result\n\n return False", "def _need_parens(outer, inner, adjust):\n return _OP_ORDER[outer.__class__] >= _OP_ORDER[inner.__class__] + adjust", "def isGE(self, a : float, b : float) -> bool:\n return (a >= b - self.tol * max(abs(a), abs(b), 1.0)) #and (a >= b - 0.1)", "def isDominated(wvalues1, wvalues2):\n not_equal = False\n for self_wvalue, other_wvalue in zip(wvalues1, wvalues2):\n print(\"self_wvalue: \"+str(self_wvalue))\n print(\"other_wvalue: \"+str(other_wvalue))\n if self_wvalue > other_wvalue:\n return False\n elif self_wvalue < other_wvalue:\n not_equal = True\n return not_equal", "def __le__(self,other):\n self_bounds = self.Bounds\n ndim = self.InferSpatialDimension()\n\n if isinstance(other,Mesh):\n other_bounds = other.Bounds\n mins = (self_bounds[0,:] >= other_bounds[0,:]).all()\n maxs = (self_bounds[1,:] <= other_bounds[1,:]).all()\n return mins and maxs\n elif isinstance(other,np.ndarray):\n # Otherwise check if an element is within a given bounds\n assert other.shape == (2,ndim)\n mins = (self_bounds[0,:] >= other[0,:]).all()\n maxs = (self_bounds[1,:] <= other[1,:]).all()\n return mins and maxs\n else:\n raise ValueError(\"Cannot compare mesh with {}\".format(type(other)))", "def __comparing_points(self, point1, point2) -> bool:\n return (abs(point1.x - point2.x) <= self.dirt_pos_tolerance and abs(\n point1.y - point2.y) <= self.dirt_pos_tolerance)", "def compute_bayesian_threshold(points, nominal_point, confidence_level):\n distances = [np.linalg.norm(p - nominal_point, ord = 1) for p in points]\n confidence_rank = min(math.ceil(len(points) * confidence_level),len(points)-1)\n #print(confidence_level, confidence_rank)\n threshold = np.partition(distances, confidence_rank)[confidence_rank]\n return threshold", "def is_same_waypoint(self, wp1, wp2, max_d=0.5, max_v=0.5):\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n ddif = dl(wp1.pose.pose.position, wp2.pose.pose.position)\n if ddif < max_d:\n return True\n return False", "def compare_gs_struct_to_BDM_structs(gs_contcar, \r\n defect_name: str, \r\n base_path: str,\r\n stol: float = 0.2,\r\n ):\r\n sm = StructureMatcher(ltol=0.2, stol=stol)\r\n defect_structures = get_structures(defect_name, base_path )\r\n for key, structure in defect_structures.items():\r\n if gs_contcar == \"Not converged\" :\r\n print(\"gs_contcar not converged\")\r\n break\r\n elif structure != \"Not converged\" :\r\n try:\r\n if sm.fit(gs_contcar, structure):\r\n return True #as soon as you find the structure, return True\r\n except AttributeError:\r\n print(\"Fucking error grabbing structure\")\r\n else:\r\n print(\"{} Structure not converged\".format(key))\r\n return False # structure not found for this charge state\r", "def test_constructed_is_small(self):\n self.assertTrue(all(elt<10 for elt in goodwinsheaf.checkradii()))#check all entries have small radii", "def LLR_above_thresh(self, threshold, groundtype):\n\t\tLLR = self.log_likelihood_ratios(groundtype=groundtype)\n\t\treturn (LLR >= threshold)", "def compare_exact_with(self, exact_details, method=\"\", err_tolerance=0.001):\n exact = exact_details[self.modified_t_z_label]\n exact_2 = exact_details[self.modified_t_z__final_h_label]\n# print(\"\\n\\nExact...\")\n# print(exact)\n \n method_dict = self.get_t_z_from_step(self.h, method)\n# print(f\"{method}.....\")\n# print(method_dict)\n# print()\n \n step_tolerance = 0.00000001\n low_step = self.h\n high_step = low_step\n \n initial_h = self.h\n not_done = True\n while not_done:\n count = len(exact)\n for exact_time, exact_value in exact.items():\n if exact_time in method_dict:\n error = abs(exact_value - method_dict[exact_time])\n if error > err_tolerance:\n # Also helps to keep track of the step before and after/on\n # ascertaining\n low_step = high_step\n high_step = (1/2.0) * low_step\n break\n if (error < err_tolerance) and count == 1:\n not_done = False\n # Use bisection method to get the actual step size.\n# mid_step = (high_step + low_step) / 2.0\n mid_step = high_step + 0.05\n# print(f\"\\n\\n{method}-----------------{low_step}, {high_step}\")\n not_seen_actual_step_size = True\n## while abs(high_step - low_step) > step_tolerance:\n# while not_seen_actual_step_size:\n# is_step_tolerable = self.is_step_tolerable(mid_step,\n# exact_details, \n# method, \n# err_tolerance)\n# if is_step_tolerable:\n# high_step = mid_step\n## mid_step = (high_step + low_step) / 2.0\n# mid_step = high_step + 0.05\n# else:\n# not_seen_actual_step_size = False\n# low_step = mid_step\n## mid_step = (high_step + low_step) / 2.0\n# mid_step = high_step\n## print(f\"\\n-----------------{low_step}, {high_step}\")\n break\n count -= 1\n method_dict = self.get_t_z_from_step(high_step, method)\n \n final_h = high_step # change this later\n modified_at_original_time = {}\n \n if self.A1 and self.A2: # store the refined {time: head} for both reservoirs\n modified_A1_at_original_time = {}\n modified_A2_at_original_time = {}\n \n for _time in exact:\n if (_time in method_dict):\n modified_at_original_time[_time] = method_dict[_time]\n if self.A1 and self.A2:\n modified_A1_at_original_time[_time] = (method_dict[_time] * self.A)/self.A1\n modified_A2_at_original_time[_time] = (method_dict[_time] * self.A)/self.A2\n \n if self.A1 and self.A2:\n return {\n self.initial_h_label: initial_h, \n self.final_h_label: final_h, \n self.modified_t_z_label: modified_at_original_time, \n self.modified_t_z_A1_label: modified_A1_at_original_time,\n self.modified_t_z_A2_label: modified_A2_at_original_time\n } \n return {\n self.initial_h_label: initial_h, \n self.final_h_label: final_h, \n self.modified_t_z_label: modified_at_original_time\n }", "def within(point: tuple, box: tuple) -> bool:\r\n \r\n return box[0] < point[0] < box[2] and box[1] < point[1] < box[3]", "def get_possible_tw(self):\n ev = self.ev\n f = np.array([np.abs(a - b) for a in ev for b in ev if not np.isclose(a, b)])\n return f[~(np.triu(np.abs(f[:, None] - f) <= settings.EQ_COMPARE_TOL, 1)).any(0)]", "def sbound(self, u, s):\n sele = u.select_atoms(s)\n calc = u.select_atoms('name CAL')\n \n dist = MDAnalysis.analysis.distances.distance_array(calc.coordinates(), sele.coordinates())\n for i, row in enumerate(dist):\n \n if any([d<2.5 for d in row]):\n\treturn (True, i)\n return (False, -1)", "def bond_check(bond_distance,bond_min=0,bond_max=1.5): # we can define the default min and max in the def\n if bond_distance >bond_min and bond_distance<bond_max:\n return True\n else:\n return False", "def para_lower_than(threshold):\n\n return lambda step, curr_obj, curr_optimized_obj, extra_para: extra_para<threshold", "def all_close(goal, actual, tolerance):\n #all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True", "def __lt__(self,other):\n self_bounds = self.Bounds\n ndim = self.InferSpatialDimension()\n\n if isinstance(other,Mesh):\n other_bounds = other.Bounds\n mins = (self_bounds[0,:] > other_bounds[0,:]).all()\n maxs = (self_bounds[1,:] < other_bounds[1,:]).all()\n return mins and maxs\n elif isinstance(other,np.ndarray):\n # Otherwise check if an element is within a given bounds\n assert other.shape == (2,ndim)\n mins = (self_bounds[0,:] > other[0,:]).all()\n maxs = (self_bounds[1,:] < other[1,:]).all()\n return mins and maxs\n else:\n raise ValueError(\"Cannot compare mesh with {}\".format(type(other)))", "def bond_check(distance, minimum=0, maximum=1.5): # when variables are set equal to => default\n if distance > minimum and distance < maximum:\n return True\n return False", "def check_threshold(data, threshold, above=True, flexibility=.02, cushion=3):\n if above:\n across = (data > threshold) * 1\n across_secondary = (data > (threshold * (1-flexibility))) * 1\n else:\n across = (data < threshold) * 1\n across_secondary = (data < (threshold * (1+flexibility))) * 1\n\n index_backdown = [i + 1 for i, x in enumerate(np.diff(across_secondary)) if x == -1]\n step_down = np.diff(np.concatenate(([0.], np.cumsum(across)[index_backdown])))\n across[index_backdown] = -step_down\n test_across = np.cumsum(across)\n times_across = sum(test_across == cushion)\n\n return across, test_across, times_across", "def test_nearest_boundary_odd():\n assert _nearest_boundary(10, 19, 14, 0) == 0\n assert _nearest_boundary(10, 19, 14, 1) == 1", "def _topographic_error_rectangular(self, data):\n t = 1.42\n # b2mu: best 2 matching units\n b2mu_inds = argsort(self._distance_from_weights(data), axis=1)[:, :2]\n b2my_xy = unravel_index(b2mu_inds, self._weights.shape[:2])\n b2mu_x, b2mu_y = b2my_xy[0], b2my_xy[1]\n dxdy = hstack([diff(b2mu_x), diff(b2mu_y)])\n distance = norm(dxdy, axis=1)\n return (distance > t).mean()", "def bond_checker(atom, dict, bond_dict):\n bound = []\n for item, values in dict.items():\n bond_range = check_bond_len(bond_dict, atom[0], values[\"element\"]) + 0.2\n if distance_checker(atom[1:], values[\"coor\"]) <= bond_range:\n bound.append(item)\n return bound", "def _check_large_tilt(self):\n large_tilt = []\n xy, xz, yz = self.tilt_factors\n x,y,_ = self.cell_lengths\n\n large_tilt.append(-x/2<xy<x/2)\n large_tilt.append(-x/2<xz<y/2)\n large_tilt.append(-x/2<yz<y/2)\n return not all(large_tilt)", "def compare_geometry(config, geometry_x, geometry_y):\n if geometry_x.upper().endswith('EMPTY') and geometry_y.upper().endswith(\n 'EMPTY'):\n return True\n\n if config in BLIST:\n return arc_distance(geometry_x, geometry_y) < EPOCH_CURVE_RELATIVE\n # return True\n # else:\n # print('arc distance: %s' %\n # str(arc_distance(geometry_x, geometry_y)))\n # return False\n\n if not config in BLIST:\n arct = wkt.loads(geometry_x)\n pgis = wkt.loads(geometry_y)\n result = arct.equals_exact(pgis, EPOCH)\n return result\n\n return False", "def _compute_connection(current_waypoint, next_waypoint, threshold=35):\n n = next_waypoint.transform.rotation.yaw\n n = n % 360.0\n\n c = current_waypoint.transform.rotation.yaw\n c = c % 360.0\n\n diff_angle = (n - c) % 180.0\n if diff_angle < threshold or diff_angle > (180 - threshold):\n return RoadOption.STRAIGHT\n elif diff_angle > 90.0:\n return RoadOption.LEFT\n else:\n return RoadOption.RIGHT", "def within_tolerance(x, y, tolerance): \r\n return abs(x) <= tolerance and abs(y) <= tolerance", "def _find_thresh(im_1, im_2, a, b, thresh_r=0.0):\n if im_1.dtype not in [np.uint16, np.uint8]:\n incr = (im_1.max() - im_1.min()) / 256.0\n else:\n incr = 1\n\n thresh_max = im_1.max()\n thresh_min = im_1.min()\n thresh = thresh_max\n r = _pearsonr_below_thresh(thresh, im_1, im_2, a, b)\n min_r = r\n min_thresh = thresh\n while thresh > thresh_min and r > thresh_r:\n thresh -= incr\n r = _pearsonr_below_thresh(thresh, im_1, im_2, a, b)\n if min_r > r:\n min_r = r\n min_thresh = thresh\n\n if thresh == thresh_min:\n thresh = min_thresh\n\n return thresh", "def intersect(MBR1, MBR2):\n if MBR1['xmin'] > MBR2['xmax'] or MBR1['xmax'] < MBR2['xmin'] or MBR1['ymin'] > MBR2['ymax'] or MBR1['ymax'] < MBR2[\n 'ymin']:\n return 0\n return 1", "def nearly_equal(a, b, sig_fig=5):\n return a == b or int(a*10**sig_fig) == int(b*10**sig_fig)", "def _reduce_distances(self, threshold):\n reduced = self.orig_dists.copy()\n reduced[reduced <= threshold] = 0\n # Remove ignored from all consideration\n ignrd_indices = [self.index[name] for name in self.ignored]\n if ignrd_indices:\n reduced[:,ignrd_indices] = np.inf\n reduced[ignrd_indices,:] = np.inf\n # Check if the given parameters are feasible\n chsn_indices = set(self.index[name] for name in self.chosen)\n avail_indices = set(self.index[name] for name in self.available)\n ca_indices = chsn_indices | avail_indices\n unassigned_indices = np.array(list(self._not_ignored_inds - ca_indices))\n if len(unassigned_indices) == 0:\n unassigned_orphans = unassigned_indices\n else:\n ca_indices = list(ca_indices)\n avail_in_range = np.count_nonzero(reduced[np.ix_(unassigned_indices,ca_indices)] == 0, axis=1)\n unassigned_orphans = unassigned_indices[avail_in_range == 0]\n return reduced, unassigned_orphans", "def __gt__(self, other):\n return self.element() > other.element()", "def compare(self, t2) -> bool:\n return True if self.get_edge(t2) >= 0 else False", "def __gt__(self, other):\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._gt_1d(other)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._gt_2d(other)\n else:\n return None", "def calibrate_threshold(test_graphs):\r\n best_threshold = None\r\n best_result = None\r\n for threhold in range(1, 50):\r\n cur_res = evaluate_argument_mention(test_graphs, threhold)\r\n if (best_result is None) or (cur_res > best_result):\r\n best_result = cur_res\r\n best_threshold = threhold\r\n return (best_threshold, best_result)", "def max_min_distance(self, T0: SE3, T1: SE3, T2: SE3) -> (float, float, str):\n tol = 10e-10\n # T_rel_01 = T0.inv().dot(T1)\n T_rel_12 = T1.inv().dot(T2)\n\n p0 = T0.as_matrix()[0:3, 3]\n z1 = T1.as_matrix()[0:3, 2]\n x1 = T1.as_matrix()[0:3, 0]\n p1 = T1.as_matrix()[0:3, 3]\n p2 = T2.as_matrix()[0:3, 3]\n\n p0_proj = p0 - (z1.dot(p0 - p1)) * z1 # p0 projected onto T1 plane\n p2_proj = p2 - (z1.dot(p2 - p1)) * z1 # p2 projected onto T1 plane\n\n if norm(p1 - p0_proj) < tol or norm(p2_proj - p1) < tol:\n d = norm(T2.trans - T0.trans)\n return d, d, False\n\n r = norm(p2_proj - p1) # radius of circle p2_proj is on\n delta_th = arctan2(cross(x1, p2_proj - p1).dot(z1), np.dot(x1, p2_proj - p1))\n\n # closest and farthest point from p0_proj\n sol_1 = r * (p0_proj - p1) / norm(p0_proj - p1) + p1\n sol_2 = -r * (p0_proj - p1) / norm(p0_proj - p1) + p1\n sol_min = min(sol_1 - p0_proj, sol_2 - p0_proj, key=norm) + p0_proj\n sol_max = max(sol_1 - p0_proj, sol_2 - p0_proj, key=norm) + p0_proj\n\n th_max = arctan2(cross(x1, sol_max - p1).dot(z1), np.dot(x1, sol_max - p1))\n th_min = arctan2(cross(x1, sol_min - p1).dot(z1), np.dot(x1, sol_min - p1))\n\n rot_min = rot_axis(th_min - delta_th, \"z\")\n d_min = norm(T1.dot(rot_min).dot(T_rel_12).trans - T0.trans)\n\n rot_max = rot_axis(th_max - delta_th, \"z\")\n d_max = norm(T1.dot(rot_max).dot(T_rel_12).trans - T0.trans)\n\n if abs(th_max - delta_th) < tol and d_max > d_min:\n return d_max, d_min, \"below\"\n elif abs(th_min - delta_th) < tol and d_max > d_min:\n return d_max, d_min, \"above\"\n else:\n return d_max, d_min, False", "def frame_in_range(frame_):\n f = cv2.cvtColor(frame_, cv2.COLOR_BGR2GRAY)\n if f.shape != grey.shape:\n f = cv2.resize(f, (grey.shape[1], grey.shape[0]))\n score = structural_similarity(f, grey)\n self.logger.debug(f\"frame score: {score}, {score>kwargs['threshold']}\")\n return score > kwargs['threshold']" ]
[ "0.5863067", "0.57756513", "0.56205285", "0.55818784", "0.54911447", "0.5490294", "0.5413528", "0.5393008", "0.53929734", "0.5383899", "0.5383543", "0.5363858", "0.533649", "0.5334817", "0.53021765", "0.5235089", "0.5234669", "0.52207446", "0.52092874", "0.52092874", "0.52015173", "0.51955855", "0.5194146", "0.51926386", "0.51905257", "0.51722556", "0.51683086", "0.5157696", "0.51570153", "0.51228076", "0.5113911", "0.5107704", "0.51043415", "0.50758296", "0.5070754", "0.5064317", "0.50520647", "0.50461364", "0.50288534", "0.5020275", "0.5004993", "0.49992675", "0.49990827", "0.4994446", "0.49931246", "0.49926496", "0.4989105", "0.49889484", "0.49882472", "0.498726", "0.4983775", "0.49780676", "0.49740267", "0.49730217", "0.4968161", "0.49668068", "0.49657232", "0.49630374", "0.49590346", "0.49575254", "0.4957118", "0.4951334", "0.49473393", "0.49443537", "0.49431545", "0.49386698", "0.49362725", "0.4933921", "0.49316996", "0.49293965", "0.49268425", "0.4926632", "0.49222556", "0.492131", "0.49209824", "0.49192455", "0.49190196", "0.4918996", "0.49122983", "0.49079335", "0.49052998", "0.490349", "0.49034184", "0.48947403", "0.48935515", "0.48929858", "0.48898536", "0.4889108", "0.48812377", "0.48647898", "0.48623762", "0.48573625", "0.48539376", "0.48535928", "0.4852655", "0.48478535", "0.48477814", "0.48403007", "0.4839132", "0.48367822" ]
0.54279095
6
Detects whether the structure has a pair or atoms in the range meaning that the depiction could be improved.
def count_suboptimal_atom_positions(self, lowerBound, upperBound): counter = 0 for i in range(self.conformer.GetNumAtoms()): center = self.conformer.GetAtomPosition(i) point = [center.x, center.y, center.z] surroundingLow = self.kd_tree.query_ball_point(point, lowerBound) surroundingHigh = self.kd_tree.query_ball_point(point, upperBound) if len(surroundingHigh) - len(surroundingLow) > 0: counter += 1 return counter / 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isRangeValid(self) -> bool:\n ...", "def check_unibranch_validity(pair, positions, intersection):\n assert(intersection is not None)\n check = []\n for par in pair:\n b = 0\n pos = positions[par-1]\n for branch in intersection:\n b +=1\n for tup in branch:\n if not isinstance(tup, dict):\n continue\n if pos in tup:\n check.append(b)\n if check[0] == check[1]:\n return True\n return False", "def f_has_range(self):\n return len(self._explored_range) > 0", "def validBond(index1, index2, direction):\n #print \"?valid bond: \", allAtoms[index1].pos, \" , \", allAtoms[index2].pos, direction\n cell1 = index1/numAtomsPerCell\n cell2 = index2/numAtomsPerCell\n #Find the coordinates of the cell in units of interaction cells\n posInX1 = int(cell1/(size*size))\n posInX2 = int(cell1/(size*size))\n leftover1 = cell1%(size*size)\n leftover2 = cell2%(size*size)\n posInY1 = int(leftover1/size)\n posInY2 = int(leftover2/size)\n posInZ1 = leftover1%size\n posInZ2 = leftover2%size\n \n #Now, a valid interaction can cross an interaction cell boundary in any direction,\n #but it has a maximum length of one interaction cell. However, I have made the minimum\n #size of this larger translated lattice equal to 3*3*3 interaction cells. Therefore,\n #when we hit an edge and get in invalid interaction, the cells will be at least 2\n #interaction cells apart in the direction of the interaction.\n if(direction[0]):\n if numpy.abs(posInX1 - posInX2)>1:\n #print \"false\"\n return False\n if(direction[1]):\n if numpy.abs(posInY1 - posInY2)>1:\n #print \"false\"\n return False\n if(direction[2]):\n if numpy.abs(posInZ1 - posInZ2)>1:\n #print \"false\"\n return False\n print #\"true\"\n return True\n\n #Old (incorrect) method:\n if 0:\r\n print \"?valid bond: \", allAtoms[index1].pos, \" , \", allAtoms[index2].pos, direction\r\n cell1 = index1/numAtomsPerCell\r\n cell2 = index2/numAtomsPerCell\r\n zRow1 = cell1/size#this relies on the list being created in the nested for loop that was used, z within y within x\r\n zRow2 = cell2/size\r\n if(zRow1 != zRow2 and direction[2]):\n print \"false\"\r\n return False\r\n xLayer1 = cell1/(size*size)\r\n xLayer2 = cell2/(size*size)\r\n if(xLayer1 != xLayer2 and direction[1]):\n print \"false\"\r\n return False\r\n #shouldn't have to check z, because if it's not valid in z direction, it would be off the list (>len(allAtoms))\n print \"true\"\r\n return True", "def _is_range(cls, rng):\n match = re.search(\"([0-9][1-9]*)-([0-9][1-9]*)\", rng)\n # Group is a singular value.\n return match is not None", "def check_interval_bounds(begin, end):\n if begin.get_midpoint() >= end.get_midpoint():\n return False\n\n if begin.get_radius() is not None and end.get_radius() is not None:\n if begin.get_midpoint() - begin.get_radius() > \\\n end.get_midpoint() - end.get_radius():\n return False\n\n return True", "def isUndefinedRange(program: ghidra.program.model.listing.Program, startAddress: ghidra.program.model.address.Address, endAddress: ghidra.program.model.address.Address) -> bool:\n ...", "def range_match(verifield, ranges):\n return verifield[0] >= ranges[0][0] and verifield[0] <= ranges[0][1] and verifield[1] >= ranges[1][0] and verifield[1] <= ranges[1][1]", "def is_pair(pair):\n return isa(pair, Pair) or is_list(pair)", "def has_compatible_ligands(self, identity):\n return ((len(self.bad_coords[identity]) == 0) and\n (not self.BAD_COORD_RESIDUE in self.inaccuracies[identity]))", "def _is_single_range(r):\n return (isinstance(r, numbers.Integral) or\n (isinstance(r, collections.Sequence) and (len(r) == 2) and\n _is_range_boundary(r[0]) and _is_range_boundary(r[1])))", "def check_types(begin, end):\n try:\n begin.get_midpoint()\n end.get_midpoint()\n except AttributeError:\n return False\n\n return isinstance(begin.get_midpoint(), type(end.get_midpoint()))", "def bond_checker(atom, dict, bond_dict):\n bound = []\n for item, values in dict.items():\n bond_range = check_bond_len(bond_dict, atom[0], values[\"element\"]) + 0.2\n if distance_checker(atom[1:], values[\"coor\"]) <= bond_range:\n bound.append(item)\n return bound", "def validRange(line):\n line_col = str.split(line)\n chrom = line_col[0]\n pos = line_col[1]\n# any(lower <= postcode <= upper for (lower, upper) in [(1000, 2249), (2555, 2574), ...])\n if any(float(low) <= float(pos) <= float(high) for (low,high) in TE_ranges[chrom]):\n return False\n return True", "def _check_range(range_):\n try:\n if not isinstance(range_, list):\n range_ = list(range_)\n min_, max_ = range_\n except (ValueError, TypeError):\n raise TypeError(\"each range in ising_linear_ranges should be a list of length 2.\")\n if not isinstance(min_, Number) or not isinstance(max_, Number) or min_ > max_:\n raise ValueError((\"each range in ising_linear_ranges should be a 2-tuple \"\n \"(min, max) where min <= max\"))\n return range_", "def _validate_structure(self, mapping):\n #Call _compare_structure according to comparison_style\n if self._comparison_style == ComparisonStyle.minimum:\n return MappingValidator._compare_structure(mapping, self._reference)\n elif self._comparison_style == ComparisonStyle.maximum:\n return MappingValidator._compare_structure(self._reference, mapping)\n else:\n return MappingValidator._compare_structure(mapping, self._reference) \\\n and MappingValidator._compare_structure(self._reference,\n mapping)", "def _check_range(r): \n if _is_single_range(r):\n _check_one_range(r)\n elif isinstance(r, collections.Sequence):\n for r2 in r:\n _check_one_range(r2)\n else:\n raise error.RangeSyntaxError(str(r))", "def is_pair_allowed(a, b):\n if a == complementary(b):\n return True\n if a == 'G' and b == 'U' or a == 'U' and b == 'G':\n return True\n return False", "def check_bp(self):\n return self.min_basepairs <= self.seqdata.basepairs <= self.max_basepairs", "def is_between(value, start, end, including_start=False, including_end=False):\n if not including_start and not including_end: # not include both start and end\n if (start < value < end):\n return True\n elif (start > end) and (start < value <= (2**m - 1) or 0 <= value < end):\n return True\n elif (start == end) and (value != start):\n return True\n return False\n elif not including_start and including_end: # include end but not the start\n if value == end:\n return True\n elif (start < value <= end):\n return True\n elif (start > end) and ((start < value <= (2**m - 1)) or (0 <= value <= end)):\n return True\n elif (start == end) and (value != start):\n return True\n return False\n elif including_start and not including_end: # include start but not the end\n if value == start:\n return True\n elif (start <= value < end):\n return True\n elif (start > end) and (start <= value <= (2**m - 1) or 0 <= value < end):\n return True\n elif (start == end) and (value != end):\n return False\n return False\n else: # include both start and end\n if (start <= value <= end):\n return True\n elif (start > end) and (start <= value <= (2**m - 1) or 0 <= value <= end):\n return True\n elif start == end:\n return True\n return False", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def f_has_range(self):\n raise NotImplementedError(\"Should have implemented this.\")", "def hasConflicts(self):\n partners = {}\n for first, second in self:\n #print >>sys.stderr, \"first:\", first, \"second:\", second\n if first is None:\n if second is None:\n continue #no pairing info\n else:\n first, second = second, first #swap order so None is 2nd\n if second is None: #check first isn't paired\n if partners.get(first, None) is not None:\n print >>sys.stderr, \"here1\"\n print >>sys.stderr, \"first:\", first, \"second:\", second\n return True\n else:\n partners[first] = None\n else: #first and second were both non-empty: check partners\n if first in partners:\n if partners[first] != second:\n print >>sys.stderr, \"here2\"\n print >>sys.stderr, \"first:\", first, \"second:\", second, \"partners[first]\", partners[first]\n print \"partners:\", partners\n return True\n if second in partners:\n if partners[second] != first:\n print >>sys.stderr, \"here3\"\n print >>sys.stderr, \"first:\", first, \"second:\", second, \"partners[second]:\", partners[second]\n return True\n #add current pair to the list of constraints\n partners[first] = second\n partners[second] = first\n #can only get here if there weren't conflicts\n return False", "def is_valid_block(self, first):\n return (self.a_cursor > first.a and\n self.b_cursor > first.b)", "def is_proper(i0, i1, i2, i3, bond_set):\n if (i0, i1) in bond_set and (i1, i2) in bond_set and (i2, i3) in bond_set and len(set([i0, i1, i2, i3])) == 4:\n return True\n return False", "def _is_range_boundary(boundary):\n return (isinstance(boundary, numbers.Integral) or\n (_is_string(boundary) and (boundary.lower() in ('min','max'))))", "def does_overlap(self, start, stop):\n\n ranges = [list(range(key, self.map[key] + 1)) for key in self.map]\n all_coords = [item for sublist in ranges for item in sublist]\n # removing all_coords implementation until we write some tests\n for i in range(start, stop + 1):\n if i in all_coords:\n return True\n return False", "def _range_contains(self, a, b):\n\t\treturn b[0] >= a[0] and b[-1] <= a[-1]", "def check_consistent(self):\n # * END LIST The end list itself must be consistent.\n # ** Each end must be of understood type\n # ** Each end must have a valid sequence or no sequence\n # ** There must be no more than one instance of each name\n # ** WARN if there are ends with no namecounts\n # * TILE LIST\n # ** each tile must be of understood type (must parse)\n # ** ends in the tile list must be consistent (must merge)\n # ** there must be no more than one tile with each name\n # self.tiles.check_consistent()\n endsfromtiles = self.tiles.glues_from_tiles()\n\n # ** WARN if any end that appears does not have a complement used or vice versa\n # ** WARN if there are tiles with no name\n # * TILE + END\n # ** The tile and end lists must merge validly\n # (checks sequences, adjacents, types, complements)\n self.glues | endsfromtiles\n\n # ** WARN if tilelist has end references not in ends\n # ** WARN if merge is not equal to the endlist\n # ** WARN if endlist has ends not used in tilelist\n # * ADAPTERS / SEEDS\n # SEED stuff was here", "def overlaps(self, chrom, start, end, strand=None):\n if (self.chrom != chrom \n or min(self.end, end) - max(self.start, start) <= 0 \n or (strand is not None and self.strand != strand)): \n return False\n return True", "def present_in_slice(self, start, stop):\n return self.starts_before(start) and self.ends_after(stop - 1)", "def validate_pairs(pairs, historical_pairs):\n if pairs is None:\n return False\n for p in pairs:\n if p in historical_pairs:\n return False\n return True", "def check_ranges(ranges, value):\n for fromto in ranges:\n start, end = fromto.split('-')\n if int(value) in range(int(start), int(end) + 1):\n return True\n # else:\n # print('%s is not between %s and %s' % (value, start, end))\n return False", "def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2", "def requires_pairing(cls) -> bool:\n return False", "def __check_range(self, movement: int) -> bool:\n next_position = self.__find_next_position_in_degrees(movement)\n if next_position < self.lowest_element.position_in_degrees:\n return False\n if next_position > self.highest_element.position_in_degrees:\n return False\n return True", "def structure_worth_investigating(left_struc, right_struc):\n if type(left_struc) is not type(right_struc):\n return False\n if type(left_struc) in TERMINALS:\n return False\n if len(left_struc) == 0 or len(right_struc) == 0:\n return False\n return True", "def rangeCheck(reqtuple, pkgtuple):\n # we only ever get here if we have a versioned prco\n # nameonly shouldn't ever raise it\n #(reqn, reqf, (reqe, reqv, reqr)) = reqtuple\n (n, a, e, v, r) = pkgtuple\n return rangeCompare(reqtuple, (n, 'EQ', (e, v, r)))", "def _can_be_list(pair):\n assert(isa(pair, Pair))\n return str(pair).find(' . ') < 0", "def check_range(self, csvop, mtype, stype, flavor, pt, eta, discr):\n allowed_range = self.allowed[(csvop, mtype, stype, flavor)]\n\n eta = abs(eta)\n allowed = all([\n eta >= allowed_range['etaMin'], eta <= allowed_range['etaMax'],\n pt >= allowed_range['ptMin'], pt <= allowed_range['ptMax'],\n discr >= allowed_range['discrMin'], discr <= allowed_range['discrMax'],\n ])\n\n if not allowed and self.verbose>2:\n print 'pt %6.1f <? %6.1f <? %6.1f' % (allowed_range['ptMin'], pt, allowed_range['ptMax'])\n print 'eta %4.1f <? %4.1f <? %4.1f' % (allowed_range['etaMin'], eta, allowed_range['etaMax'])\n print 'discr %4.1f <? %4.1f <? %4.1f' % (allowed_range['discrMin'], discr, allowed_range['discrMax'])\n\n return allowed", "def bond_check(bond_distance,bond_min=0,bond_max=1.5): # we can define the default min and max in the def\n if bond_distance >bond_min and bond_distance<bond_max:\n return True\n else:\n return False", "def check_order(current, hit, overlap = 200):\n prev_model = current[-1][2:4]\n prev_strand = current[-1][-2]\n hit_model = hit[2:4]\n hit_strand = hit[-2]\n # make sure they are on the same strand\n if prev_strand != hit_strand:\n return False\n # check for sequential hits on + strand\n if prev_strand == '+' and (prev_model[1] - hit_model[0] >= overlap):\n return False\n # check for sequential hits on - strand\n if prev_strand == '-' and (hit_model[1] - prev_model[0] >= overlap):\n return False\n else:\n return True", "def chain_rangeValid(start, stop):\r\n for i in range(start, stop):\r\n chain = chain_153(i)\r\n if len(chain) > 1 or chain[0] == 153:\r\n for j in chain_153(i):\r\n print(j)", "def check_optional_range(specific=None, begin=None, end=None):\n if specific and (begin and end):\n raise ValueError('Cannot pass both a range and specific')\n\n if (begin and not end) or (end and not begin):\n raise ValueError(\"Must pass both begin and end for ranges\")", "def acceptsArgument(self):\n range = self.validateRange(self.range)\n return not(not(range[1]))", "def isMergeable(int1,int2):\n if set(int1.span)&set(int2.span) or int1.maxval+1==int2.minval:\n return True\n else:\n return False", "def overlaps(self, atom, check_up_to, get_all_overlapping_atoms=True):\n if (check_up_to == 0):\n return True, []\n distances = self.structure.get_distances(atom, [i for i in range(0, check_up_to)], mic=True)\n minimum_percentage_allowed = 0.99\n valid = True\n overlappingAtoms = []\n\n init_distance = self.Atoms[atom][\"radius\"]\n\n for i in range(0, check_up_to):\n if (i == atom):\n continue\n minimum_distance = init_distance + self.Atoms[i][\"radius\"]\n if (distances[i] / minimum_distance < minimum_percentage_allowed):\n overlappingAtoms.append([i, minimum_distance - distances[i]])\n #print(\"Minimum allowed: \" + str(minimum_distance) + \", dist: \" + str(distances[i]))\n valid = False\n if (not get_all_overlapping_atoms):\n break\n\n return valid, overlappingAtoms", "def IsBound(self) -> bool:", "def compute_pair_bounds(self, edges, pair):\n lower_bounds =[]\n upper_bounds = []\n for arc in edges:\n l_e = self.arc_info[arc][\"lower_bound\"]\n u_e = self.arc_info[arc][\"upper_bound\"]\n f_mij = self.compute_f_mij(arc, pair)\n lower_bounds.append(l_e - f_mij)\n upper_bounds.append(u_e - f_mij)\n lb = max(lower_bounds + [0])\n # in case no edges in here, make max of 5,000\n if len(upper_bounds) == 0:\n i = pair[0]\n j = pair[1]\n print(\"Path i ({}): {}\".format(i, self.paths[i]))\n print(\"Path j ({}): {}\".format(j, self.paths[j]))\n ub = min(upper_bounds + [5000])\n #print(\"lower bounds: {}\".format(lower_bounds))\n #print(\"upper bounds: {}\".format(upper_bounds))\n return(lb, ub)", "def overlap(component1, component2):\n if component1[0].start <= component2[0].stop and component2[0].start <= component1[0].stop:\n if component1[1].start <= component2[1].stop and component2[1].start <= component1[1].stop:\n return True\n return False", "def is_valid_pair(self, pair, exchange):\n pairs = self.ccxt.get_pairs(exchange)\n print(pairs)\n return pair in pairs", "def ok(self, point):\n [x1, x2, x3, x4, x5, x6] = point.decisions\n if x1 + x2 -2 < 0:\n return False\n if 6 - x1 - x2 < 0:\n return False\n if 2 - x2 + x1 < 0:\n return False\n if 2 - x1 + 3*x2 < 0:\n return False\n if 4 - (x3 - 3)**2 - x4 < 0:\n return False\n if (x5 - 3)**3 + x6 - 4 < 0:\n return False\n for i, d in enumerate(point.decisions):\n if d < self.decisions[i].low or d > self.decisions[i].high:\n print i, d, self.decisions[i].low, self.decisions[i].high\n return False\n return True", "def check_if_no_bond(atom1, atom2, bond_list, bond_generic):\n check = False\n for bond in bond_list:\n if ((bond.identity == get_bond_id(atom1, atom2)[0]) or (bond.identity == get_bond_id(atom1, atom2)[1]) and calculate_3D_distance_2_atoms(atom1, atom2) > 1.05 * bond.length):\n check = True\n for bond in bond_generic:\n if (((atom1.atom_name[0] + atom2.atom_name[0]) == bond.identity) or (atom2.atom_name[0] + atom1.atom_name[0] == bond.identity) and (calculate_3D_distance_2_atoms(atom1, atom2) > 1.05 * bond.length)):\n check = True \n return check", "def can_overlap(self):\n return False", "def is_organic(fragment):\n # TODO: Consider a different definition?\n # Could allow only H, C, N, O, S, P, F, Cl, Br, I\n for a in fragment.GetAtoms():\n if a.GetAtomicNum() == 6:\n return True\n return False", "def contains(self, mention):\n return self.start <= mention.start and mention.end <= self.end", "def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8", "def is_minpair(first, second, corpus_context, segment_pairs, environment_filter):\n first = getattr(first, corpus_context.sequence_type)\n second = getattr(second, corpus_context.sequence_type)\n\n if len(first) != len(second):\n return False\n\n has_difference = False\n for i in range(len(first)):\n if first[i] == second[i]:\n continue\n elif (conflateable(first[i], second[i], segment_pairs)\n and fits_environment(first, second, i, environment_filter)):\n has_difference = True\n continue\n else:\n return False\n\n if has_difference:\n return True", "def in_range(x, y):\n if (x < 0 or x > width or y < 0 or y > length):\n return False\n else:\n return True", "def _check_separate(gti0, gti1):\n gti0_start = gti0[:, 0]\n gti0_end = gti0[:, 1]\n gti1_start = gti1[:, 0]\n gti1_end = gti1[:, 1]\n\n if (gti0_end[-1] <= gti1_start[0]) or (gti1_end[-1] <= gti0_start[0]):\n return True\n\n for g in gti1.flatten():\n for g0, g1 in zip(gti0[:, 0], gti0[:, 1]):\n if (g <= g1) and (g >= g0):\n return False\n for g in gti0.flatten():\n for g0, g1 in zip(gti1[:, 0], gti1[:, 1]):\n if (g <= g1) and (g >= g0):\n return False\n return True", "def _check_one_range(r):\n if not _is_single_range(r):\n raise error.RangeSyntaxError(str(r))", "def ValidClusterRanges(self):\n for cluster_range in self.cluster_ranges:\n the_range = cluster_range.split(\"-\")\n print(f\"Checking that range {the_range} falls within our data area\")\n try:\n if int(the_range[0]) < self.low_data_cluster or int(the_range[1]) > self.high_data_cluster:\n print(f\"False. {the_range[0]} or {the_range[1]} is outside of our data area\")\n return False\n except TypeError as t_err:\n print(f\"Error. Range does not appear to be an int\")\n return False\n return True", "def has_bounds(self):\r\n bounds = self.bounds\r\n if bounds in (None, [None, None]):\r\n return False\r\n for i in xrange(bounds[0]):\r\n if bounds[0][i] is not None and bounds[0][i] > -np.inf:\r\n return True\r\n for i in xrange(bounds[1]):\r\n if bounds[1][i] is not None and bounds[1][i] < np.inf:\r\n return True\r\n return False", "def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)", "def _inside_bounds(A, B):\n for axis in 'xyz':\n minA, maxA = axis_bounds(A, axis)\n minB, maxB = axis_bounds(B, axis)\n if (minA <= minB) or (maxA >= maxB):\n return False\n\n return True", "def IsInRange(self, id, start, isStartInclusive, end, isEndInclusive):\r\n if isStartInclusive == False:\r\n start = (start + 1) % NODES\r\n if isEndInclusive == True:\r\n end = (end + 1) % NODES\r\n allRanges = []\r\n if(start < end):\r\n allRanges.append(range(start, end))\r\n else:\r\n allRanges.append(range(start, NODES))\r\n allRanges.append(range(0, end))\r\n for r in allRanges:\r\n if id in r:\r\n return True\r\n return False", "def eh_tabuleiro(tab):\r\n if not type(tab)==tuple:\r\n return False\r\n if len(tab)==3:\r\n for linha in tab:\r\n if not type(linha)==tuple:\r\n return False\r\n if len(linha)==3:\r\n for num in linha:\r\n if not (num in [-1,0,1] and type(num)==int):\r\n return False\r\n else:\r\n return False\r\n else:\r\n return False\r\n return True", "def is_valid_single_attempt(self, atoms_init, atoms_final):\n from scipy.spatial import cKDTree as KDTree\n from random import shuffle\n atoms1 = atoms_init.copy()\n atoms2 = atoms_final.copy()\n\n vol1 = atoms1.get_volume()\n vol2 = atoms2.get_volume()\n if vol2 > vol1:\n ratio = (vol2/vol1)**(1.0/3.0)\n cell1 = atoms1.get_cell()\n atoms1.set_cell(cell1*ratio, scale_atoms=True)\n else:\n ratio = (vol1/vol2)**(1.0/3.0)\n cell2 = atoms2.get_cell()\n atoms2.set_cell(cell2*ratio, scale_atoms=True)\n\n # Try construct the relation\n used_indices = []\n tree = KDTree(atoms2.get_positions())\n indices = list(range(0, len(atoms1)))\n shuffle(indices)\n for atom in atoms1:\n if atom.symbol in self.exclude:\n continue\n dist, closest = tree.query(atom.position, k=12)\n srt_indx = np.argsort(dist)\n dist = [dist[indx] for indx in srt_indx]\n closest = [closest[indx] for indx in srt_indx]\n\n if all(c in used_indices for c in closest):\n # More than one atom is closest to this\n # structure\n self.rejected_reason = \"More than one atom mapped onto the \"\n self.rejected_reason += \"same atoms in the initial structure\"\n return False\n\n # First, unused with mathing symbol\n closest_indx = None\n closest_dist = None\n for i, indx in enumerate(closest):\n if atoms2[indx].symbol == atom.symbol and indx not in used_indices:\n closest_indx = indx\n closest_dist = dist[i]\n break\n\n if closest_indx is None:\n self.rejected_reason = \"No unused atoms with macthing symbol!\"\n return False\n \n used_indices.append(closest_indx)\n if closest_dist > self.max_displacement:\n # The displacement is larger than the tolereance\n self.rejected_reason = \"Max displacement too large\"\n return False\n \n if atom.symbol != atoms2[closest_indx].symbol:\n self.rejected_reason = \"Mapped symbol does not match!\"\n return False\n return True", "def _check_market_place_in_range(self):\n\t\tfor building in self.get_buildings_in_range():\n\t\t\tif building.id == BUILDINGS.MARKET_PLACE_CLASS:\n\t\t\t\tif StaticPather.get_path_on_roads(self.island, self, building) is not None:\n\t\t\t\t\t# a market place is in range\n\t\t\t\t\treturn\n\t\t# no market place found\n\t\tself.session.ingame_gui.message_widget.add(self.position.origin.x, self.position.origin.y, \\\n\t\t 'NO_MARKET_PLACE_IN_RANGE')", "def __pair_maximizer(alpha_pairs, pair):\n for alt in alpha_pairs:\n if pair != alt and pair[0].issubset(alt[0]) and pair[1].issubset(alt[1]):\n return False\n return True", "def overlap(range1, range2):\n if range1[0] <= range2[1] and range2[0] <= range1[1]:\n return True\n return False", "def check_for_residue_existance(self):\n if not self.pose.total_residue():\n print \"No pose Loaded.\"\n return False\n\n if not self.current_chain.get() or not self.current_residue.get():\n print \"Chain or residue not set\"\n return False\n\n current_region = self.current_residue.get().split(\":\")\n if len(current_region)>1:\n ResStart = int(current_region[0]); ResEnd = int(self.current_region[1])\n\n\n if self.pose.pdb_info().pdb2pose(self.current_chain.get(), ResStart)==0 or self.pose.pdb_info().pdb2pose(self.current_chain.get(), ResEnd)==0:\n print \"Region not found in pose\"\n return False\n else:\n if self.pose.pdb_info().pdb2pose(self.current_chain.get(), int(self.current_residue.get())) ==0:\n\n print \"Residue not found in pose\"\n return False\n\n #If everythig is good then:\n return True", "def overlap(p1: Tuple, p2: Tuple) -> bool:\n if (p2[1] - p1[0]) * (p2[0] - p1[1]) <= 0:\n return True\n else:\n return False", "def isValidPair(self,s1,s2):\n if (s1 == '(' and s2 == ')'):\n return True\n if (s1 == '[' and s2 == ']'):\n return True\n if (s1 == '{' and s2 == '}'):\n return True\n return False", "def is_complete_multipartite(self):\n if self._.d != 2:\n return False\n if not self._has(\"p\"):\n self.pTable()\n return any(self._.p[0, i, i] == self._.p[j, i, i]\n for i, j in [(1, 2), (2, 1)])", "def is_valid_descriptor(key):\n return key in Bounds.standard_bound_names or Bounds.parse_pct(key) is not None", "def _should_be_pair(s_list):\n assert(isa(s_list, List))\n return str(s_list).find(' . ') > 0", "def __check(self):\n if len(self._data)!=len(self._ptbins)+1: \n raise IndexError('Pt bins mismatch')\n for ptbin in self._data:\n if len(ptbin)!=len(self._etabins)+1:\n raise IndexError('Eta bins mismatch')", "def is_bound(self, point):\n return self.__begin == point or self.__end == point", "def _is_well_formed(l):\n\tif _is_symbol(l):\n\t\treturn 1\n\tif (type(l) == types.TupleType and len(l) == 2\n\t\t\tand l[0] == neg and _is_well_formed(l[1])):\n\t\treturn 1\n\tif (type(l) == types.TupleType and len(l) == 3\n\t\t\tand _is_binary(l[1])\n\t\t\tand _is_well_formed(l[0]) and _is_well_formed(l[2])):\n\t\treturn 1\n\treturn 0", "def check_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_overlaps - something went wrong... no data?\")\n pass", "def test_simsam_range_functions_without_mapping_file(self):\r\n actual = qiime.simsam.simsam_range(\r\n self.tutorial_otu_table,\r\n self.tutorial_tree,\r\n [1],\r\n [0.1])\r\n self.assertEqual(len(list(actual)), 1)", "def check_for_overlapping_features(mRNA, gene_name):\n overlap_found = False\n all_feature_positions = []\n for feature in mRNA.sub_features:\n # BCBio uses 0-based and end-exclusive positions (first-third base is bases 0,1,2, i.e range 0-3), \n # so add 1 to start and keep end as is to convert to 1-based-end-inclusive\n all_feature_positions.append((feature.location.start.position+1, feature.location.end.position, feature.id))\n all_feature_positions.sort()\n for feature1_data,feature2_data in itertools.izip(all_feature_positions,all_feature_positions[1:]):\n (_,feature1_end,feature1_name), (feature2_start,feature2_end,feature2_name) = feature1_data, feature2_data\n if feature1_end>=feature2_start:\n overlap_found = True\n # check for \"feature1 contains feature2\", print a warning, since it can make other things not work right\n if feature1_end>=feature2_end:\n print(\"WARNING: feature %s is completely inside feature %s in gene %s!\"%(feature1_name,feature2_name,gene_name)\n +\" There may be additional undetected overlaps downstream.\")\n return overlap_found", "def _molge(x, y):\n if x is None or y is None:\n return False\n if hasattr(x, '_substructfp'):\n if not hasattr(y, '_substructfp'):\n y._substructfp = _fingerprinter(y, True)\n if not DataStructs.AllProbeBitsMatch(y._substructfp, x._substructfp):\n return False\n match = x.GetSubstructMatch(y)\n x.__sssAtoms = []\n if match:\n if highlightSubstructures:\n x.__sssAtoms = list(match)\n return True\n else:\n return False", "def _only_one_type(self):\n num_larger_than_1 = 0\n for symb, indices in self.atoms_indx.items():\n if len(indices) > 0:\n num_larger_than_1 += 1\n return num_larger_than_1 <= 1", "def verify_cutoff_pair(cutoff, pair, voltages):\n flag = -1\n for i in range(len(cutoff)):\n pairs = cutoff[i]\n if pair in pairs or list(reversed(pair)) in pairs:\n if voltages[i] is 'S':\n flag = i\n return flag\n return flag", "def validate_position(position: Tuple[int, int], bound: int) -> bool:\n if position[0] < 0 or position[0] >= bound:\n return False\n if position[1] < 0 or position[1] >= bound:\n return False\n return True", "def is_valid(self) -> bool:\n if self.total <= 1:\n # Definitely valid (i.e. no conflict) if 0 or 1. In practice, this\n # function probably won't be called if there are 0 fixes, but 0 is\n # valid; it simply means \"no fixes to apply\".\n return True\n if self.total == 2:\n # This is only OK for this special case. We allow this because\n # the intent is clear (i.e. no conflict): Insert something *before*\n # the segment and something else *after* the segment.\n return self.create_before == 1 and self.create_after == 1\n # Definitely bad if > 2.\n return False # pragma: no cover", "def is_union(self) -> bool:\n return False", "def _in_range_op(spec):", "def almost_there(n):\n # Get the absolute value of n\n n = abs(n)\n # Create the ranges to test for n\n hundi_range = list(range(90, 111))\n two_hundi_range = list(range(190, 211))\n # see if n is in any of the ranges\n return n in hundi_range or n in two_hundi_range", "def inrange(cc, point):\n return point.row in range(cc.top, cc.bottom+1) and point.col in range(cc.left, cc.right+1)", "def is_bound(pos1, el1, pos2, el2):\n threshold = 0.1\n if el1 == 'H' or el2 == 'H':\n threshold = 0.2\n if np.linalg.norm(np.array(pos1) - np.array(pos2)) < covalence_radius[el1] + covalence_radius[el2] + threshold:\n return True\n return False", "def check_for_overlapping_genes(sequence_record):\n overlapping_gene_pairs = []\n all_gene_positions = []\n for gene in sequence_record.features:\n # BCBio uses 0-based and end-exclusive positions (first-third base is bases 0,1,2, i.e range 0-3), \n # so add 1 to start and keep end as is to convert to 1-based-end-inclusive\n all_gene_positions.append((gene.location.start.position+1, gene.location.end.position, gene.id))\n all_gene_positions.sort()\n for gene1_data,gene2_data in itertools.izip(all_gene_positions,all_gene_positions[1:]):\n (gene1_start,gene1_end,gene1_name), (gene2_start,gene2_end,gene2_name) = gene1_data, gene2_data\n if gene1_end>=gene2_start:\n overlapping_gene_pairs.append((gene1_name,gene2_name))\n # check for \"gene1 contains gene2\", print a warning, since it can make other things not work right\n if gene1_end>=gene2_end:\n print(\"WARNING: gene %s is completely inside gene %s! \"%(gene1_name, gene2_name)\n +\"Various gene-position-related results may be inaccurate.\")\n return overlapping_gene_pairs\n # MAYBE-TODO rewrite it so it actually detects ALL overlaps? Right now if gene A contains nonoverlapping genes B and C, it'll sort them as (A,B,C) since A starts first, so it'll detect the (A,B) overlap, but it won't detect the (A,C) overlap because it doesn't CHECK (A,C), only (A,B) and (B,C). This could be fixed either by just brute-force checking all gene pairs (and then using DNA_basic_utilities.position_test_overlap), or by writing something prettier. In any case, not a priority, since generally genes DON'T OVERLAP AT ALL.", "def is_union(self):\n return False", "def _find_if_half_int(indics, sums_dict, resolvers):\n\n for indic in indics:\n if isinstance(indic, (JOf, MOf)):\n # j component is always considered half integer for nuclear\n # problems.\n #\n # TODO: Make this behaviour configurable.\n return True\n else:\n range_ = try_resolve_range(indic, sums_dict, resolvers)\n if range_ is None or not range_.bounded:\n continue\n\n lower = range_.lower\n if lower.is_integer:\n return False\n elif (2 * lower).is_integer:\n return True\n\n continue\n\n # After all indicators have been tried.\n return None", "def _validate_data_range(\n self, data_range: defaultdict, task_names: Set[str]\n ) -> bool:\n\n if not isinstance(data_range, (dict, defaultdict)):\n raise TypeError(f\"Invalid data range type - Must be a dictionary\")\n elif not set(data_range.keys()).issuperset(task_names):\n raise KeyError(f\"Data range not defined for all tasks: {task_names}\")\n elif False in [key.keys() >= {\"min\", \"max\"} for key in data_range.values()]:\n raise KeyError(f\"Missing required fields: min and max\")\n else:\n return True", "def overlaps(self, region):\n region = as_region(region)\n\n if region.chromosome != self.chromosome:\n return False\n\n if self.end is None or region.start is None or region.start <= self.end:\n if self.start is None or region.end is None or region.end >= self.start:\n return True\n return False", "def chkBD(pos, m):\n cur = False\n for x in [-1, 1]:\n if(pos[0] + x >= 0 and pos[0] + x < m['r_len']):\n cur = cur or m['ver'][pos[0]+x][pos[1]] == 2\n if(pos[1] + x >= 0 and pos[1] + x < m['c_len']):\n cur = cur or m['hor'][pos[0]][pos[1]+x] == 2\n\n return cur", "def isValid(self, start, end):\n for s in self.skip:\n if start <= s[0] <= end or start <= s[1] <= end:\n return False\n return True", "def is_span_valid(self)->bool:\n if self.get_start_offset() < 0 or self.get_end_offset() < 0:\n logger.error(\"Start and end of position of the fragment must be non-negative: %d, %d\"\n %(self.get_start_offset(), self.get_end_offset()))\n return False\n if self.get_start_offset() >= self.get_end_offset():\n logger.error(\"End position of the fragment must be greater than the starting one: start=%d, end=%d\"%(self.get_start_offset(), self.get_end_offset()))\n return False\n return True" ]
[ "0.61594635", "0.59847325", "0.58420426", "0.58221036", "0.58218324", "0.5720427", "0.5718489", "0.57073534", "0.5707279", "0.568753", "0.5665801", "0.5664754", "0.56299406", "0.5600717", "0.55935943", "0.55612284", "0.55557734", "0.5544614", "0.5533591", "0.5522513", "0.55161816", "0.54850316", "0.54530406", "0.54183024", "0.5409831", "0.5409013", "0.5395864", "0.5363896", "0.53618425", "0.5360566", "0.5352373", "0.5351778", "0.53449583", "0.5334991", "0.53167814", "0.53076416", "0.5304512", "0.5303538", "0.5299887", "0.529673", "0.5270952", "0.5270238", "0.52667916", "0.5254681", "0.52408934", "0.52386385", "0.52312815", "0.52139986", "0.5210858", "0.520782", "0.52068967", "0.5204813", "0.5195408", "0.5195122", "0.5194577", "0.5193209", "0.5186137", "0.5181614", "0.51776576", "0.5172347", "0.5169533", "0.51692325", "0.51540744", "0.51506126", "0.5137038", "0.51324844", "0.5128333", "0.5126324", "0.51210606", "0.51123995", "0.51118106", "0.5091524", "0.5087841", "0.5084239", "0.50803757", "0.50763106", "0.50715053", "0.50647956", "0.5064246", "0.50575763", "0.50569594", "0.5055849", "0.5053222", "0.5049823", "0.5047579", "0.5046762", "0.50462735", "0.50416327", "0.5039353", "0.5036415", "0.50336075", "0.5031073", "0.50171024", "0.50167936", "0.5016673", "0.5015702", "0.5013723", "0.50116444", "0.5004344", "0.50021863", "0.50021154" ]
0.0
-1
Counts number of collisions among all bonds. Can be used for estimations of how 'wrong' the depiction is.
def count_bond_collisions(self): errors = 0 for i in range(0, len(self.bonds)): for a in range(i + 1, len(self.bonds)): result = self._intersection(self.bonds[i], self.bonds[a]) if result: errors += 1 return errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_bonds(self):\n n = 0\n for bond in self.iter_bonds():\n n += 1\n return n", "def count_bonds(self):\n n = 0\n for bond in self.iter_bonds():\n n += 1\n return n", "def get_collisions(self) -> int:\n if len(self.obstacles) == 0:\n return 0\n collision_list = [ob.detect_collision(self.agent)\n for ob in self.obstacles]\n return sum(collision_list)", "def get_collisions(self) -> int:\n if len(self.obstacles) == 0:\n return 0\n collision_list = [ob.detect_collision(self.agent)\n for ob in self.obstacles]\n\n return sum(collision_list)", "def get_collisions(self) -> int:\n c = 0\n for o in self.obstacles:\n if not isinstance(o, Bomb):\n continue # only consider apples\n xy_diff = o.get_position()[:2] - self.agent.get_position()[:2]\n dist = np.linalg.norm(xy_diff)\n # obstacles are only active when they are visible...\n if o.is_visible and dist < self.detection_distance:\n o.update_visuals(make_visible=False)\n c += 1\n return c", "def get_collisions(self) -> int:\n if len(self.obstacles) == 0:\n collision_list = []\n else:\n collision_list = [ob.detect_collision(self.agent)\n for ob in self.obstacles]\n return sum(collision_list)", "def __numHeads(self):\n count = 1\n\n while (self.__coinFlip() == 1):\n count += 1\n return count", "def get_number_of_bulls(self):\n list_of_bulls = [i for i, j in zip(self.puzzle, self.guess) if i == j]\n bulls = len(list_of_bulls)\n return bulls", "def count_balls(self, **kwargs):\n return 0", "def get_total_collisions(self):\n return self.count_collisions", "def number_of_containing_bags(self) -> int:\n\n bag_count = 0\n for sub_bag_count, sub_bag_color in self.containing_bags:\n bag_count += sub_bag_count\n bag_count += (\n sub_bag_count * bag_registry[sub_bag_color].number_of_containing_bags\n )\n return bag_count", "def numnems(self):\n count = 0\n for o in self._objs.values():\n count += len(o.netifs())\n return count", "def nClumps(self):\n \n return len(self)", "def total_num_bonds(self):\n return self.GetNumberOfBonds()", "def _num_of_consolidated(self, observation):\n a = set(observation)\n b = set(np.arange(self.num_of_servers))\n intersect = b - a\n return len(intersect)", "def _num_of_consolidated(self, observation):\n a = set(observation)\n b = set(np.arange(self.num_of_servers))\n intersect = b - a\n return len(intersect)", "def lives_counter(self):\n count = 15\n for row in self.board:\n for column in row:\n if column == HITSHIP:\n count -= 1\n self.lives = count\n return self.lives", "def depiction_score(self):\n\n collision_penalty = 1\n degenerated_penalty = 0.4\n\n bond_collisions = self.count_bond_collisions()\n degenerated_atoms = self.count_suboptimal_atom_positions(0.0, 0.5)\n\n score = (\n collision_penalty * bond_collisions\n + degenerated_penalty * degenerated_atoms\n )\n\n return round(score, 1)", "def ball_num(self):\n counter = 0\n for i in range(0, 100):\n if self.cells[i].is_ball:\n counter += 1\n return int(counter)", "def get_destroyed_ships_count(self):\n destroyed_ships_count = 0\n for row_index in range(self.rows):\n for column_index in range(self.columns):\n cell = self.grid[row_index][column_index]\n if cell.has_destroyed_ship():\n destroyed_ships_count += 1\n\n return destroyed_ships_count", "def heavy_count(mol,idxs):\n count = 0\n for num, bonds in enumerate(mol.GetBonds()):\n if mol.GetBondWithIdx(num).GetBeginAtomIdx() == idxs:\n if mol.GetAtomWithIdx(mol.GetBondWithIdx(num).GetEndAtomIdx()).GetSymbol() != 'H':\n count += 1\n elif mol.GetBondWithIdx(num).GetEndAtomIdx() == idxs:\n if mol.GetAtomWithIdx(mol.GetBondWithIdx(num).GetBeginAtomIdx()).GetSymbol() != 'H':\n count += 1\n return count", "def countEdges(self):\n n = 0\n for (hub, table) in self.totsupport.iteritems():\n n += len(table)\n return n", "def obstacle_count(self):\n #scan area in front of robot\n self.scan()\n #Figure ot how many obstacles there were\n see_an_object = False\n count = 0", "def buses_count(self):\n\n count = 0\n for line in self.__bus_dict.values():\n # for item in buses:\n count += len(line)\n return count", "def count_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_atoms()\n return n", "def count_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_atoms()\n return n", "def count_liberties(self, x, y):\n return len(self.get_liberties(x, y))", "def count_all_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_all_atoms()\n return n", "def no_locked_budgets(self) -> int:\n count = 0\n for budget in self.budgets.values():\n if budget.locked:\n count += 1\n return count", "def get_number_of_cheeses(self):\n number = 0\n for i in range(len(self._stools)):\n number += len(self._stools[i])\n return number", "def length(self):\n # Loop through all buckets\n # Count number of key-value entries in each bucket\n\n # could be done with 1 line with comprehension\n # return sum(bucket.length() for bucket in self.buckets)\n\n total_entries = 0\n\n for linked_list in self.buckets:\n total_entries += linked_list.length()\n\n return total_entries", "def get_num_hit(boxes_truth, boxes_pred, is_hit):\n out = 0\n for tbox in boxes_truth:\n for pbox in boxes_pred:\n if is_hit(tbox, pbox):\n out += 1\n return out", "def member_count(self) -> int:\n return sum([g.member_count for g in self.guilds])", "def get_number_of_cows(self):\n bulls = self.get_number_of_bulls()\n list_of_cows = set(self.puzzle) & set(self.guess)\n cows = (len(list_of_cows) - bulls)\n return cows", "def orbit_count(objects: Dict[str, ObjectMass]) -> int:\n total = 0\n\n for mass in objects.values():\n total += mass.orbit_count()\n\n return total", "def break_count(self):\n return len(self.link_ids) + len(self.crossring_cleavages)", "def numberOfBoomerangsSlow(self, points):\n\n def is_boomerang(i, j, k):\n dist_a = pow(j[0] - i[0], 2) + pow(j[1] - i[1], 2)\n dist_b = pow(k[0] - i[0], 2) + pow(k[1] - i[1], 2)\n return dist_a == dist_b\n\n total = 0\n for i in points:\n for j in points:\n for k in points:\n if i != j and j != k and is_boomerang(i, j, k):\n total += 1\n return total", "def getPerCgidNumMarkedForDeletion (self):\n total = dict()\n for cid,cgid in self._cidsToBeMarkedBad.iteritems():\n if cgid not in total:\n total[cgid]=0\n num=total[cgid]\n total[cgid]=num+1\n return total", "def get_collisions(self) -> int:\n return 0 # no obstacles are spawned for Circle tasks", "def hits(self):\n return len(self.successes) + len(self.failures)", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def num_species_on_map(self):\n # tot_herbivores = 0\n # tot_carnivores = 0\n # for cells in itertools.chain.from_iterable(self.map):\n # curr_herbivore, curr_carnivore = cells.num_species_per_cell()\n # tot_herbivores += curr_herbivore\n # tot_carnivores += curr_carnivore\n\n return (sum(x) for x in zip(*[cells.num_species_per_cell() for cells in itertools.chain.from_iterable(self.map)]))\n\n # (sum(x) for x in zip(*[cells.num_species_per_cell() for cells in itertools.chain.from_iterable(self.map)]))", "def num_empty(self):\n count = 0\n for i in self.__buckets:\n if i.size() == 0:\n count += 1\n return count", "def num_of_orbits(orbitDict):\n\n total_orbits = len(orbitDict.keys())\n total_orbits += independent_orbits(orbitDict)\n\n return total_orbits", "def get_num_obstacles(coord_a, coord_b):\n obstacle_count = 0\n \n for x in range(coord_a.x, coord_b.x + 1):\n for y in range(coord_a.y, coord_b.y + 1):\n coord = Coordinate(x, y)\n if coord in self.wall_coords or coord in state:\n obstacle_count += 1\n \n return obstacle_count", "def get_collision_score(self, thetas):\n\n collision_score = 0\n\n for n in range(len(self.obstacles)):\n for i in range(len(self.arm_lengths)):\n if self.get_links(thetas)[i].check_wall_collision(self.obstacles[n]):\n collision_score += 1\n\n return -collision_score", "def number_of_beds(self):\n return self._number_of_beds", "def number_bites_accessed(self) -> int:\r\n accessed_bites = {\r\n row['bite']\r\n for row in self.rows\r\n }\r\n\r\n return len(accessed_bites)", "def count(self):\n # TODO not implemented yet\n return 0", "def count_all_atoms(self):\n n = 0\n for atm in self.atom_order_list:\n if isinstance(atm, Atom):\n n += 1\n else:\n n += len(atm)\n return n", "def count(self):\n return len(self._components)", "def count_discs(self, player: Player) -> int:\n count = 0\n player_disc = disc.get_disc(player)\n for i in range(self.size):\n for j in range(self.size):\n if self._grid[i][j] == player_disc:\n count += 1\n return count", "def num_herbs(self):\n return self._num_herbs", "def count_winning_blocks(self, gameboard):\r\n count = {'red':0.1, 'blue':0.1}\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n h = gameboard.check_horizontal_state(position)\r\n v = gameboard.check_vertical_state(position)\r\n d1 = gameboard.check_diag_1_state(position)\r\n d2 = gameboard.check_diag_2_state(position)\r\n for state in [h, v, d1, d2]:\r\n if ((state.count('red') + state.count('x') == 5)\r\n and (state.count('red') > 0)):\r\n count['red'] += np.power(3, (state.count('red') - 1))\r\n elif ((state.count('blue') + state.count('x') == 5)\r\n and (state.count('blue') > 0)):\r\n count['blue'] += np.power(3, (state.count('blue') - 1))\r\n return count", "def recurse_bag_count(bag_map, current_bag):\n result = 0\n if bag_map.get(current_bag):\n current_bags = bag_map[current_bag]\n for bag_color, count in current_bags.items():\n result += count + count*recurse_bag_count(bag_map, bag_color)\n return result\n else:\n return 0", "def get_number_of_non_exhausted_ballots(self):\n return len(self._ballots) - len(self._exhausted_ballots)", "def _count_living_neighbors(self, cell: Cell) -> int:\n count = 0\n # borders of the area in which we are trying to find neighbors\n # Let's assume y axis directs downside and x axis directs to the left\n \n for x in range(cell.x - 1, cell.x + 2):\n for y in range(cell.y - 1, cell.y + 2):\n if cell.x == x and cell.y == y:\n continue\n if (x, y) in self.living_cells.keys():\n count += 1\n \n return count", "def fixation_count(self) -> int:\n return len([fix for fix in self.fixations if not fix.excluded])", "def total_height_blocks(validator):\n res = 0\n for bhash, b in validator.processed.items():\n if isinstance(b, Block):\n res += 1\n return res", "def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count", "def ship_count(self):\r\n return sum(f.ship_count for f in self)", "def countComponents(self, n: int, edges: List[List[int]]) -> int:\n # BFS O_n time and space\n \n # union find ALG\n uf = UnionFind(n)\n \n for x, y in edges:\n uf.union(x, y)\n \n return len(set(uf.find(x) for x in range(n)))", "def hives_count(self) -> int:\n return self.hives.count()", "def atom_count(self):\n return len(self.repeated_elements())", "def total_cards(self):\n amount = 0\n for palo in self._cards:\n amount = amount + len(self._cards[palo])\n\n return amount", "def total_baryon_number(particles: list[Particle]) -> int:\n return sum(particle.baryon_number for particle in particles)", "def getHitCount(self): #$NON-NLS-1$\r", "def count_all_atoms(self):\n n = 0\n for model in self.iter_models():\n n += model.count_all_atoms()\n return n", "def number_of_herbivores_island(self):\n return np.sum(self.herbivores_on_island)", "def obstacle_count(self):\n self.wide_scan()\n found_something = False\n counter = 0\n for distance in self.scan:\n if distance and distance < 200 and not found_something:\n found_something = True\n counter += 1\n print(\"Object # %d found, I think\" % counter)\n if distance and distance > 200 and found_something:\n found_something = False\n print(\"\\n----I SEE %d OBJECTS----\\n\" % counter)", "def number_of_bells(self) -> int:\n return self._tower.number_of_bells", "def bounced_member_count(self):\n return self._bounced_member_count", "def __len__(self):\n count = 0\n for recovery_set in self.recovery_sets.values():\n count += len(recovery_set.packets)\n return count", "def count_dead_node(self):\n count = 0\n for node in self.node:\n if node.energy < 0:\n count += 1\n return count", "def Hbond_donors(self):\n if not self.hasBonds:\n self.buildBondsByDistance()\n num_donors = 0\n for a in self.allAtoms:\n if a.element == 'H':\n num_donors += a.bonds[0].neighborAtom(a).element in ('O', 'N')\n return num_donors", "def count(self, syms = None ):\n if syms == None:\n syms = self.alpha.getSymbols()\n for sym in syms:\n idx = self.alpha.getIndex( sym )\n self.cnt[idx] += 1.0\n self.tot += 1", "def count(self):\n return len(self.deck)", "def get_number_of_attacked_fields(piece, inverted_piece_map, board):\n return len(board.attacks(inverted_piece_map[piece]))", "def count(self):\n return sum(1 for _ in self)", "def __len__(self):\n return len(self.qc_mol.atoms) + len(self.br_mol.atoms) + len(self.pc_mol.atoms)", "def count(self):\n return len([i for i in self.iteritems()])", "def carn_count(self):\n return len(self.carnivores)", "def __len__(self) -> int:\n num_batches, remainder = divmod(len(self.mapped_triples), self.batch_size)\n if remainder and not self.drop_last:\n num_batches += 1\n return num_batches", "def number_bites_resolved(self) -> int:\r\n resolved_bites = {\r\n row['bite']\r\n for row in self.rows\r\n if row['completed'] == 'True'\r\n }\r\n\r\n return len(resolved_bites)", "def obstacle_count(self):\n for x in range(6):\n # do a scan of the area in front of the robot\n self.scan()\n\n \n see_an_object = False\n count = 0 \n # Do a scan and count the amount of objects in the way\n for angle in self.scan_data:\n dist = self.scan_data[angle]\n if dist < self.SAFE_DISTANCE and not see_an_object: \n see_an_object = True\n count += 1\n print(\"~~~ I SEE SOMETHING!! ~~~\")\n elif dist > self.SAFE_DISTANCE and see_an_object:\n see_an_object = False\n print(\"I guess the object ended\") \n print(\"ANGLE: %d | DIST: %d\" % (angle, dist))\n self.turn_by_deg(90)\n print(\"\\nI saw %d objects\" % count)", "def num_conll(self):\n pass", "def obstruction_fuse_counter(self) -> Counter[GriddedPerm]:\n if self._obstruction_fuse_counter is not None:\n return self._obstruction_fuse_counter\n obs = (ob for ob in self._tiling.obstructions if not self.is_crossing_len2(ob))\n fuse_counter = self._fuse_counter(obs)\n self._obstruction_fuse_counter = fuse_counter\n return self._obstruction_fuse_counter", "def get_bag_count(self):\n # b.get_bag_count() + 1 because get_bag_count does not count itself\n # A bag does not contain itself for our purposes.\n return sum([(b.get_bag_count() + 1) * n for b, n in self.bags])", "def num_zombies(self):\n return len(self._zombie_list)", "def num_zombies(self):\n return len(self._zombie_list)", "def crates_destroyed(self, game_state: dict):\n\n bomb_position_x = game_state['self'][3][0]\n bomb_position_y = game_state['self'][3][1]\n n_crates = 0\n\n for i in range(3):\n if bomb_position_x - i - 1 >= 0:\n if game_state['field'][bomb_position_x - i - 1][bomb_position_y] == 1:\n n_crates += 1\n elif game_state['field'][bomb_position_x - i - 1][bomb_position_y] == -1:\n break\n\n for i in range(3):\n if bomb_position_x + i + 1 <= 16:\n if game_state['field'][bomb_position_x + i + 1][bomb_position_y] == 1:\n n_crates += 1\n elif game_state['field'][bomb_position_x + i + 1][bomb_position_y] == -1:\n break\n\n for i in range(3):\n if bomb_position_y - i - 1 >= 0:\n if game_state['field'][bomb_position_x][bomb_position_y - i - 1] == 1:\n n_crates += 1\n elif game_state['field'][bomb_position_x][bomb_position_y - i - 1] == -1:\n break\n\n for i in range(3):\n if bomb_position_y + i + 1 <= 16:\n if game_state['field'][bomb_position_x][bomb_position_y + i + 1] == 1:\n n_crates += 1\n elif game_state['field'][bomb_position_x][bomb_position_y + i + 1] == -1:\n break\n\n return n_crates", "def herb_count(self):\n return len(self.herbivores)", "def count_alive_neighbors(self, status):\n kernel = np.array(\n [[1, 1, 1],\n [1, 0, 1],\n [1, 1, 1]])\n\n count = convolve2d(status, kernel, mode='same', boundary=\"wrap\")\n return count", "def _fuse_counter(\n self, gridded_perms: Iterable[GriddedPerm]\n ) -> Counter[GriddedPerm]:\n fuse_counter: Counter[GriddedPerm] = collections.Counter()\n for gp in gridded_perms:\n fused_perm = self.fuse_gridded_perm(gp)\n fuse_counter[fused_perm] += 1\n return fuse_counter", "def count():", "def getNumPlayers(self):\n return len(self.__colordict__.keys())", "def num_zombies(self):\r\n return len(self._zombie_list)", "def count_neighboors(self, x: int, y: int) -> int :\n\n cpt : int = 0\n min_x : int = max(0, x - 1)\n max_x : int = min(x + 1, self.width-1)\n min_y : int = max(0, y - 1)\n max_y : int = min(y + 1, self.height-1)\n\n x_tmp : int\n y_tmp : int\n for x_tmp in range(min_x, max_x+1):\n for y_tmp in range(min_y, max_y+1):\n if self.is_alive(x_tmp, y_tmp) and not (x_tmp == x and y_tmp == y):\n cpt += 1\n return cpt", "def total(self) -> int:\n return len(self.fixes)", "def n_cs(self):\n return np.size(self._cs, 0)" ]
[ "0.6844145", "0.6844145", "0.68411255", "0.68388706", "0.67286265", "0.66780823", "0.64739144", "0.62883806", "0.6247389", "0.62287146", "0.61868566", "0.617473", "0.6031441", "0.60012114", "0.5952148", "0.5952148", "0.5926391", "0.5919017", "0.5828075", "0.5818269", "0.58110774", "0.5802668", "0.57986635", "0.57904416", "0.5785508", "0.5785508", "0.57784915", "0.5745833", "0.5738232", "0.57300115", "0.5729718", "0.5709548", "0.5706891", "0.5679507", "0.56765515", "0.56680954", "0.56446", "0.5637301", "0.5627948", "0.5619012", "0.5612355", "0.5608553", "0.55930775", "0.55929977", "0.5586841", "0.55821025", "0.5573228", "0.5559599", "0.5557419", "0.5550731", "0.55457056", "0.55424863", "0.55291396", "0.5516464", "0.55132115", "0.550765", "0.5500156", "0.5494899", "0.5488374", "0.54826564", "0.5480385", "0.54777545", "0.5475259", "0.5474838", "0.54743725", "0.547104", "0.5470736", "0.5462222", "0.5453427", "0.5452413", "0.5451288", "0.5450492", "0.54473007", "0.5442237", "0.54400796", "0.5433074", "0.54316694", "0.542892", "0.5424643", "0.54198974", "0.54182386", "0.5415561", "0.5412904", "0.5412195", "0.54088956", "0.5407554", "0.5404873", "0.54033476", "0.54021555", "0.54021555", "0.53936446", "0.5391897", "0.5388817", "0.53847057", "0.53770334", "0.5373032", "0.5372889", "0.5372175", "0.53659326", "0.5358903" ]
0.7136189
0
Tells if the structure contains collisions
def has_bond_crossing(self): return self.count_bond_collisions() > 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collision_check(self):\n return True", "def check_collisions(self):", "def validate_collision(self):\n pass", "def _check_for_collision(self, sample):\n collide=False\n for i in range(len(self.obstacles)):\n collide=collide or self.obstacles[i].in_collision(sample)\n return collide", "def collides(self, other):\r\n for block in self.blocks:\r\n for obstacle in other.blocks:\r\n if block.col == obstacle.col and block.row == obstacle.row:\r\n return True\r\n return False", "def check_collision(self):\n self.collided = False\n\n for point in self.collision_points:\n\n try:\n if self.game_map.get_at((\n int(point[0]), int(point[1])\n )) == WHITE_COLOR:\n self.collided = True\n break\n except:\n self.collided = True", "def is_collision_by_map_data(self):\n raise NotImplementedError", "def check_collision(self, footprint):\n return self.upperleft[0] < footprint.upperleft[0] < footprint.upperright[0] < self.upperright[0] and \\\n self.upperleft[1] < footprint.upperleft[1] < footprint.bottomleft[1] < self.bottomleft[1]", "def is_collision_by_map_obstacle(self):\n for content in self.contents:\n if self.content.y == self.y and self.content.x == self.x:\n return True\n else:\n return False", "def check_collide(self):\n\n\t\tfor pizza in self.overlapping_sprites:\n\t\t\tpizza.handle_collide()", "def collide(obj1, obj2):\n offset_x = obj2.x - obj1.x #The difference between obj1 and obj 2\n offset_y = obj2.y - obj1.y \n return obj1.mask.overlap(obj2.mask, (int(offset_x), int(offset_y))) != None # (x,y)", "def check_collide(self):\n for pizza in self.overlapping_sprites:\n pizza.handle_collide()", "def _has_collision(self, pos:Point) -> bool:\n\t\t# Check if given postion overlaps with any of the blocks\n\t\thit_list = self._collision_list(pos, self.all_blocks)\n\n\t\t# Check if given postion overlaps with any of the players\n\t\t# This makes players non-clipable\n\t\thit_list += self._collision_list(pos, self.players.values())\n\n\t\t# Make bomb non-clipable\n\t\thit_list += self._collision_list(pos, self.bomb_list)\n\n\t\t# Loop through each colliding sprite, remove it, and add to the score.\n\t\treturn len(hit_list) > 0", "def collision(self):\n raise NotImplementedError", "def check_collide(self):\r\n for raindrop in self.overlapping_sprites:\r\n raindrop.handle_collide()", "def check_collide(self):\r\n for paddle in self.overlapping_sprites:\r\n self.score.value +=10", "def check_collision(new_x, new_y):\n query_x = -1024 + (new_x * player.width)\n query_y = -1024 + (new_y * player.height)\n # print('queried %s,%s' % (query_x, query_y))\n zone_query = zone_map[player.current_zone].index.intersect(\n bbox=(query_x, query_y, query_x, query_y)\n )\n for i in zone_query:\n # print('found:%s which is %s' % (i.name, i.collision))\n if i.collision:\n return False\n return True", "def box_collision(self):\n border_box_pos_1 = self.box_1.x + self.box_1.width/2\n border_box_pos_2 = self.box_2.x - self.box_2.width/2\n\n if (border_box_pos_2 - border_box_pos_1) <= 0:\n return True\n else:\n return False", "def can_overlap(self):\n return False", "def has_collide(self, obj):\n rect1 = self.anim.getRect()\n rect2 = obj.anim.getRect()\n \n rect1.move_ip(self.pos)\n rect2.move_ip(obj.pos)\n \n return rect1.colliderect(rect2)", "def accurate_collision(self, other) -> bool:\r\n if self.collide:\r\n if self.bbox_intersect(other):\r\n offset = round(self.x - other.x), \\\r\n round(self.y - other.y)\r\n if self.mask.overlap(other.mask, offset): # Overlap returns None or 1 point\r\n return True\r\n return False\r\n else:\r\n return False", "def check_collide(self): #New of Rev2.0\r\n for pizza in self.overlapping_sprites:\r\n self.score.value +=50 #Incr. score by 50\r\n pizza.handle_collide()", "def collision_detect(self):\n\n # Check if the collision was with a map\n # Rect-based collision code\n for map_rect in Map.current_map.collision_rects:\n collision_time, norm_x, norm_y = collision.aabb_swept_collision(self.rect, (self.vx, self.vy), map_rect)\n if collision_time != 1:\n if DEBUG: print(\"[collision]\", collision_time)\n break\n self.x += self.vx * collision_time\n self.y += self.vy * collision_time\n\n remaining_time = 1 - collision_time\n \"\"\"\n if remaining_time > 0:\n self.vx *= remaining_time;\n self.vy *= remaining_time;\n \"\"\"\n if collision_time != 1:\n if abs(norm_x) > .0001:\n self.vx = -self.vx * COLLISION_DAMPING\n if abs(norm_y) > .0001:\n self.vy = -self.vy * COLLISION_DAMPING\n self.collision_counter += 1\n return True\n return False\n\n # Old, mask-based collision code\n \"\"\"\n self.mask = pygame.mask.from_surface(self.image)\n point = pygame.sprite.collide_mask(Map.current_map, self)\n if point:\n if COLLISION_ALGORITHM_EXPERIMENTAL:\n self.vx, self.vy = collision.calculate_reflection_angle(Map.current_map.mask, point, (self.vx, self.vy))\n else: \n self.vx, self.vy = collision.simple_collision(Map.current_map.mask, point, (self.vx, self.vy))\n self.vx, self.vy = self.vx * COLLISION_DAMPING, self.vy * COLLISION_DAMPING\n \n self.collision_counter += 1\n return True\n return False\n \"\"\"", "def collide(b1,b2):\n if mag(b1.pos-b2.pos) < (b1.radius + b2.radius - .05):\n return True", "def check_collide(self):\n for pizza in self.overlapping_sprites:\n pizza.handle_collide()\n pizza.destroy", "def _check_collisions(self):\n if self.bomb_status == False:\n collisions = pygame.sprite.groupcollide(self.bullets, self.stars, True, True)\n else:\n collisions = pygame.sprite.groupcollide(self.bullets, self.stars, False , True)\n if collisions:\n for stars in collisions.values():\n self.stats.score += self.settings.star_points * len(stars)\n self.sb.prep_score()\n self.sb.check_high_score()\n if not self.stars:\n # Destroy existing bullets and create new galaxy.\n self.bullets.empty()\n self._create_galaxy()\n self.settings.increase_speed()\n # Increase level.\n self.stats.level += 1\n self.sb.prep_level()", "def check_overlap(current, hit, overlap = 200):\n for prev in current:\n p_coords = prev[2:4]\n coords = hit[2:4]\n if get_overlap(coords, p_coords) >= overlap:\n return True\n return False", "def is_in_collision(self, molecule):\n if self is molecule: # a molecule can't collide with itself\n return False\n return (self.position - molecule.position).length() < 2 * Molecule.radius", "def check_collisions(self):\n for tail in self.tail:\n if tail.position == self.head.position:\n self.die()\n\n future_pos = Position(self.head_x + self.direction.move_x * Const.SQUARE_SIZE,\n self.head_y + self.direction.move_y * Const.SQUARE_SIZE)\n\n if future_pos.x < 0 or future_pos.x > Const.G_B_W - Const.SQUARE_SIZE or \\\n future_pos.y < 0 or future_pos.y > Const.G_B_H - Const.SQUARE_SIZE:\n self.die()", "def is_collided(self, rect):\n # return self.get_hit_box().colliderect(rect)\n player_hitbox = self.get_hit_box()\n distance = math.sqrt((math.pow(rect[0]-player_hitbox[0],2) + (math.pow(rect[1]-player_hitbox[1],2))))\n # dont collide with objects passed you\n if distance < self.player_image_size[0] and rect[0] >= player_hitbox[0]:\n return True\n else:\n return False", "def check_for_collision(self, obj: Union[Pipe, Base], base: Base) -> bool:\n bird_mask = self.get_mask()\n for mask, top in zip(obj.get_mask(), obj.get_tops()):\n if bird_mask.overlap(mask, (round(obj.x) - self.x, top - int(self.y))):\n return True\n\n # Collision with ground\n if self._rotate_image()[1].colliderect(base.rect):\n return True\n return False", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def collide(self):\n dist = distance.cdist(self.object_position, self.object_position, \"euclidean\")\n collision = ((dist - self.object_radius) <= 0) * 1\n np.fill_diagonal(collision, 0)\n collision = np.sum(collision, axis=1)\n print(dist)\n print(collision)\n return collision", "def is_collision(self):\n to_del_tie_fighters = []\n to_del_bullets = []\n for i in range(len(self.tie_fighters)):\n for j in range(len(self.bullets)):\n distance = math.sqrt(math.pow(self.tie_fighters[i].position_x -\n self.bullets[j].position_x, 2) +\n math.pow(self.tie_fighters[i].position_y -\n self.bullets[j].position_y, 2))\n if distance < 25:\n to_del_tie_fighters.append(i)\n to_del_bullets.append(j)\n self.score += 1\n for i in reversed(to_del_tie_fighters):\n del self.tie_fighters[i]\n self.number_of_tie_fighters -= 1\n for j in reversed(to_del_bullets):\n del self.bullets[j]", "def if_overlap(self, x, y) -> bool:\n if self.pos[y][x] != '-':\n print('此坐标已有棋子,请仔细观察棋盘')\n return True\n return False", "def test_collisions(self):\n if self.fortress_exists:\n if self.smallhex.collide(self.ship):\n self.gameevents.add(\"collide\", \"small_hex\", \"ship\")\n else:\n self.smallhex.small_hex_flag = False\n for i, shell in enumerate(self.shell_list):\n if shell.collide(self.ship):\n self.gameevents.add(\"collide\", \"shell\", i)\n #need to treat this carefully - the mine can overlap the fortress, so we don't want to remove the same missile twice\n for i, missile in enumerate(self.missile_list):\n del_missile = False\n if self.fortress_exists and missile.collide(self.fortress):\n self.gameevents.add(\"collide\", \"missile_\" + str(i), \"fortress\")\n del_missile = True\n for j, mine in enumerate(self.mine_list):\n if missile.collide(mine) and not missile.collide(self.fortress):\n self.gameevents.add(\"collide\", \"missile_\" + str(i), \"mine_\" + str(j))\n del_missile = True\n if del_missile:\n del self.missile_list[i]\n for i, mine in enumerate(self.mine_list):\n if mine.collide(self.ship):\n self.gameevents.add(\"collide\", \"mine_\" + str(i), \"ship\")", "def is_overlapping(box1, box2):\n if box1[2] <= box2[0]: # If box1 is to the left of box2\n return False\n elif box1[0] >= box2[2]: # If box1 is to the right of box2\n return False\n elif box1[3] <= box2[1]: # If box1 is below box2\n return False\n elif box1[1] >= box2[3]: # If box1 is above box2\n return False\n else:\n return True", "def _check_for_collisions(self):\n for player in self.players:\n self._check_for_bubble_collision(self.balls, True, player)\n self._check_for_bubble_collision(self.hexagons, False, player)\n self._check_for_bonus_collision(player)", "def _check_collisions(self):\n\t\tif pygame.sprite.spritecollide(\n\t\t\tself.bolan, \n\t\t\tself.obstacles.obstacles,\n\t\t\tFalse, \n\t\t\tpygame.sprite.collide_mask):\n\t\t\t\tself.is_play = False\n\t\t\t\tself.is_gameover = True\n\t\t\t\tself.bolan.image = self.settings.bolan_dead_image", "def hasCollidedWith(self,otherEntity):\n distance=math.sqrt((otherEntity.xPos-self.xPos)**2+(otherEntity.yPos-self.yPos)**2)\n return distance < (self.hitboxRadius+otherEntity.hitboxRadius)", "def collide(self, x, y):\n return self._rect.collidepoint(x, y)", "def is_overlapping(self, region):\n if self.x2 < region.x1:\n return False # this box is left the other\n if self.x1 > region.x2:\n return False # this box is right the other\n if self.y2 < region.y1:\n return False # this box is above the other\n if self.y1 > region.y2:\n return False # this box is below the other\n return True", "def check_collisions(primary, secondaries):\n collisions = pygame.sprite.spritecollide(primary, secondaries, False, pygame.sprite.collide_mask)\n return collisions", "def collision(cubes, player):\n if player in cubes:\n return True\n else:\n return False", "def should_grow_on_food_collision(self):\n return True", "def check_collision(self, a, b):\n\n dis_x = abs((a.x+a.r + a.dx)-(b.x+b.r + b.dx))\n dis_y = abs((a.y+a.r + a.dy)-(b.y+b.r + b.dy))\n distance = math.sqrt(dis_x*dis_x + dis_y*dis_y)\n\n if distance <= (b.r + a.r) and (a.colliding == False or b.colliding == False):\n\n return True", "def check_collision():\n global round_over\n\n if player1.alive:\n if pygame.sprite.spritecollide(\n player1,\n obstacles,\n False,\n pygame.sprite.collide_mask):\n collision_text()\n reset_players()\n else:\n if pygame.sprite.spritecollide(\n player2,\n obstacles,\n False,\n pygame.sprite.collide_mask):\n round_over = True\n collision_text()\n reset_players()", "def on_collision(self):", "def wall_collision(self):\n border_box_pos_1 = self.box_1.x - self.box_1.width/2\n\n if (border_box_pos_1) <= 0:\n return True\n else:\n return False", "def check_coll(self, particle):\r\n \r\n r1, r2 = self.radius, particle.radius\r\n x1, x2 = self.position, particle.position\r\n di = x2-x1\r\n norm = np.linalg.norm(di)\r\n if norm-(r1+r2)*1.1 < 0:\r\n return True\r\n else:\r\n return False", "def valid_collision(self, collision_tuple):\n # check if first entity has not been in collision since computation of collision\n if collision_tuple[2][0] == self.collision_count_particles[collision_tuple[1][0]]:\n # if there is a second particle and it has been in another collision -> False\n if len(collision_tuple[2]) == 2 and \\\n collision_tuple[2][1] != self.collision_count_particles[collision_tuple[1][1]]:\n return False\n # Accept if there is only one particle, or the second particle has not been in another collision\n else:\n return True\n else:\n return False", "def check_collisions(self, offset, index, obstacles):\n unaltered = True\n self.rect.move_ip(offset)\n while pygame.sprite.spritecollideany(self, obstacles):\n\n # First of all, check if it is a motile transparent block.\n # if so, do nothin\n col_spr = pygame.sprite.spritecollideany(self, obstacles)\n if hasattr(col_spr, \"inertia\"):\n if col_spr.inertia:\n break\n\n if self.climb:\n\t self.climb_mobility = False\n else:\n self.climb_mobility = True\n\n self.rect[index] += (1 if offset[index] < 0 else -1)\n unaltered = False\n #print(\"DEBUG: PLAYERCOL, {}\".format(index))\n\n # stop walking animation\n if index == 0:\n self.walk = False\n\n\n return unaltered", "def collision(self, block):\n if self.pos_x == block.pos_x and self.pos_y+self.height == block.pos_y:\n self.col_d = True\n if self.pos_x == block.pos_x+block.width and self.pos_y == block.pos_y:\n self.col_l = True\n if self.pos_x == block.pos_x-self.width and self.pos_y == block.pos_y:\n self.col_r = True", "def collide(self, map_object):\n if not self.tile.passable:\n return False\n else:\n if len(self.objects) == 0:\n return True\n else:\n for o in self.objects:\n o.collide(map_object)\n if not o.passable:\n return False\n return True", "def is_collision_conf(self, q: np.ndarray) -> bool:\n for obs in self.obstacles:\n if np.fabs(q[2]-obs[0]) <= obs[3] and np.fabs(q[0]) <= obs[1] and np.fabs(q[1]) <= obs[2]:\n return True\n return False", "def _check_collisions(self, link_pose_mat, avoidance_radius):\n for link_pose in link_pose_mat:\n # only use x,y,z from link pose\n x_3x1 = np.array((link_pose[0, 0], link_pose[0, 1], link_pose[0, 2]))\n if self.check_collision(x_3x1, avoidance_radius):\n return True\n return False", "def _collide(self):\n\n collisions = self._get_collisions()\n for collision in collisions:\n self._update_excitation(collision)\n atom1 = self.atoms[collision[0]]\n atom2 = self.atoms[collision[1]]\n\n r = atom1.pos-atom2.pos\n r_mag = np.linalg.norm(r)\n r_hat = r/r_mag\n\n v_1_r = np.dot(atom1.vel, r_hat)\n v_2_r = np.dot(atom2.vel, r_hat)\n\n v_1_r_f = (atom1.mass-atom2.mass)*v_1_r/(atom1.mass + atom2.mass)\\\n + 2*atom2.mass*v_2_r/(atom1.mass + atom2.mass)\n v_2_r_f = (atom2.mass-atom1.mass)*v_2_r/(atom1.mass + atom2.mass)\\\n + 2*atom1.mass*v_1_r/(atom1.mass + atom2.mass)\n\n delta_v_1 = (v_1_r_f - v_1_r)*r_hat\n delta_v_2 = (v_2_r_f - v_2_r)*r_hat\n\n self.atoms[collision[0]].vel += delta_v_1\n self.atoms[collision[1]].vel += delta_v_2", "def check_collisions(self, g):\n self.rects = {}\n for gc in self.sc.game_objects:\n self.load_game_object(gc)\n if g.name in self.backw_rects.keys():\n r = self.backw_rects[g.name]\n return r.collidedictall(self.rects)\n return []", "def player_collision(self, player):\n return True", "def is_collision(dict_a, dict_b):\n\n intersection = set(dict_a.values()) & set(dict_b.values())\n if not intersection:\n # Empty\n return False\n else:\n # Not Empty\n return True", "def check_wall_collision(self):\r\n if self.head.xcor() > 280 or self.head.xcor() < -280 or \\\r\n self.head.ycor() > 280 or self.head.ycor() < -280:\r\n return False\r\n else:\r\n return True", "def __collision_sprite(self) -> bool:\n if pygame.sprite.spritecollide(self.player.sprite,self.pipes,False):\n return True\n else: \n return False", "def setupCollisions(self) :", "def is_overlap(box_1, box_2, iou_th):\n return box_1.iou(box_2) > iou_th", "def check_collision(self, x_3x1, avoidance_radius):\n if len(self.obstacles) > 1:\n for obs_point in self.obstacles[:]:\n dist = np.linalg.norm(obs_point - x_3x1)\n if dist < avoidance_radius:\n print \"dist: \" + str(dist)\n # a collision was found within the avoidance radius\n return True\n return False", "def is_collision_at(self, x, y):\n return self._on_post(x, y)", "def collide(self, pos):\n\t\tpass", "def detect_bolt_collision(self,bolt):\n\n # [x,y]\n top_left_bolt = [bolt.get_bolt_x() - BOLT_WIDTH/2 , bolt.get_bolt_y() + BOLT_HEIGHT/2]\n top_right_bolt = [bolt.get_bolt_x() + BOLT_WIDTH/2 , bolt.get_bolt_y() + BOLT_HEIGHT/2]\n bottom_left_bolt = [bolt.get_bolt_x() - BOLT_WIDTH/2 , bolt.get_bolt_y() - BOLT_HEIGHT/2]\n bottom_right_bolt =[bolt.get_bolt_x() + BOLT_WIDTH/2 , bolt.get_bolt_y() - BOLT_HEIGHT/2]\n\n if self.contains((top_left_bolt[0],top_left_bolt[1])):\n return True\n if self.contains((top_right_bolt[0],top_right_bolt[1])):\n return True\n if self.contains((bottom_left_bolt[0],bottom_left_bolt[1])):\n return True\n if self.contains((bottom_right_bolt[0],bottom_right_bolt[1])):\n return True\n\n return False", "def detect_bolt_collision(self,bolt):\n\n # [x,y]\n top_left_bolt = [bolt.get_bolt_x() - BOLT_WIDTH/2 , bolt.get_bolt_y() + BOLT_HEIGHT/2]\n top_right_bolt = [bolt.get_bolt_x() + BOLT_WIDTH/2 , bolt.get_bolt_y() + BOLT_HEIGHT/2]\n bottom_left_bolt = [bolt.get_bolt_x() - BOLT_WIDTH/2 , bolt.get_bolt_y() - BOLT_HEIGHT/2]\n bottom_right_bolt =[bolt.get_bolt_x() + BOLT_WIDTH/2 , bolt.get_bolt_y() - BOLT_HEIGHT/2]\n\n if self.contains((top_left_bolt[0],top_left_bolt[1])):\n return True\n if self.contains((top_right_bolt[0],top_right_bolt[1])):\n return True\n if self.contains((bottom_left_bolt[0],bottom_left_bolt[1])):\n return True\n if self.contains((bottom_right_bolt[0],bottom_right_bolt[1])):\n return True\n\n return False", "def collide(self, p1, p2):\n distance = p1.pos.distance(p2.pos) # distance between to particles\n if distance.length() < (p1.size + p2.size):\n pass", "def detect_collision(self, pos):\n for rect in self.stock:\n if rect.collidepoint((pos)):\n return ['S0', 0]\n for rect in self.waste:\n if rect.collidepoint((pos)):\n return ['W0', 0]\n for i, rect in enumerate(self.foundations):\n if rect.collidepoint((pos)):\n return ['F'+str(i), 0]\n for rect in self.tableaus:\n if rect[0].collidepoint((pos)):\n return rect[1]\n\n return [0,0]", "def _get_collisions(self):\n\n collisions = []\n for i in range(self.n_atoms):\n for j in range(i+1, self.n_atoms):\n if self._overlapping(self.atoms[i], self.atoms[j]):\n if not((i, j) in self.overlap):\n collisions.append((i, j))\n else:\n try:\n self.overlap.remove((i, j))\n except ValueError:\n pass\n\n for i, j in collisions:\n for entry in self.overlap:\n if i in entry or j in entry:\n self.overlap.remove(entry)\n\n self.overlap += collisions\n return collisions", "def collisions(x1,y1,x2,y2):\n collisions = 0\n m = (y2 - y1) / (x2 - x1)\n y = lambda x: m*(x-x1) + y1\n \n under = None\n for obstacle in obstacles:\n rx = obstacle.get_x()\n ry = obstacle.get_y()\n rh = obstacle.get_height()\n rw = obstacle.get_width()\n \n intersects = False \n if y(x) < ry + rh and y(rx) > ry:\n intersects = True\n if y(rx) > ry + rh and y(rx+rw) < ry+rh:\n intersects = True\n if y(rx) < ry + rh and y(rx+rw) > ry+rh:\n intersects = True\n if y(rx) > ry and y(rx+rw) < ry:\n intersects = True\n if y(rx) < ry and y(rx+rw) > ry:\n intersects = True \n \n if intersects:\n collisions += 1\n \n return collisions", "def test_simple_collision(self):\n with PhysicsEngineHarness('tests/simple-collision.json') as physics_engine:\n # In this case, the first entity is standing still and the second\n # on a collision course going left to right. The two should bounce.\n # Entity 0 has r=50 and everything else 0.\n # Entity 2 has r=30, x=-500, vx=10, and everything else 0.\n # There's also entity 1, which is far away and shouldn't interact.\n # Let's do some math oh hey, they should collide at t=42.\n approach = physics_engine.get_state(41)\n bounced = physics_engine.get_state(43)\n self.assertTrue(approach[0].x > approach[2].x)\n self.assertTrue(approach[2].vx > 0)\n self.assertTrue(bounced[0].x > bounced[2].x)\n self.assertTrue(bounced[2].vx < 0)\n self.assertEqual(\n round(approach[1].vy),\n round(bounced[1].vy))", "def is_colliding(network, allocations):\n for allocation in allocations:\n if network.overlaps(allocation):\n return True\n return False", "def check_collisions(self):\r\n\r\n # NOTE: This assumes you named your targets list \"targets\"\r\n\r\n for bullet in self.bullets:\r\n for target in self.targets:\r\n\r\n # Make sure they are both alive before checking for a collision\r\n if bullet.alive and target.alive:\r\n too_close = bullet.radius + target.radius\r\n\r\n if (abs(bullet.center.x - target.center.x) < too_close and\r\n abs(bullet.center.y - target.center.y) < too_close):\r\n # its a hit!\r\n # This is bonus target hit\r\n if target.type == \"Bonus\":\r\n bullet.alive = False\r\n self.score += target.hit(self.targets)\r\n # This is other targets hit \r\n else:\r\n bullet.alive = False\r\n self.score += target.hit()\r\n\r\n # We will wait to remove the dead objects until after we\r\n # finish going through the list\r\n\r\n # Now, check for anything that is dead, and remove it\r\n self.cleanup_zombies()", "def check_collisions(self):\n\n # NOTE: This assumes you named your targets list \"targets\"\n\n for bullet in self.bullets:\n for target in self.targets:\n\n # Make sure they are both alive before checking for a collision\n if bullet.alive and target.alive:\n too_close = bullet.radius + target.radius\n\n if (abs(bullet.center.x - target.center.x) < too_close and\n abs(bullet.center.y - target.center.y) < too_close):\n # its a hit!\n bullet.alive = False\n self.score += target.hit()\n\n # We will wait to remove the dead objects until after we\n # finish going through the list\n\n # Now, check for anything that is dead, and remove it\n self.cleanup_zombies()", "def is_overlapping(t):\n memlen, itemsize, ndim, shape, strides, offset = t\n visited = 1 << memlen\n for ind in indices(shape):\n i = memory_index(ind, t)\n bit = 1 << i\n if visited & bit:\n return True\n visited |= bit\n return False", "def is_colliding(self, rect):\n\t\treturn rect.colliderect(self._collision_rect)", "def collision_pipes(self, pipes_list: list):\n result = False\n for pipe in pipes_list:\n if self.x_pos + self.width > pipe.x_pos and self.x_pos < pipe.x_pos + pipe.width:\n if self.y_pos < pipe.y_pos_up + pipe.height: # collide with top\n result = True\n break\n elif self.y_pos + self.height > pipe.y_pos_down: # collide with bottom\n result = True\n break\n return result", "def is_overlapping(t):\n memlen, itemsize, ndim, shape, strides, offset = t\n visited = 1<<memlen\n for ind in indices(shape):\n i = memory_index(ind, t)\n bit = 1<<i\n if visited & bit:\n return True\n visited |= bit\n return False", "def CheckOverlap(self, via):\r\n\r\n for item in self.overlappings:\r\n if type(item) is pcbnew.PAD:\r\n if item.GetBoundingBox().Intersects(via.GetBoundingBox()):\r\n return True\r\n elif type(item) is pcbnew.PCB_VIA:\r\n # Overlapping with vias work best if checking is performed by intersection\r\n if item.GetBoundingBox().Intersects(via.GetBoundingBox()):\r\n return True\r\n elif type(item) in [pcbnew.ZONE, pcbnew.FP_ZONE]:\r\n if item.GetBoundingBox().Intersects(via.GetBoundingBox()):\r\n return True\r\n elif type(item) is pcbnew.PCB_TRACK:\r\n if item.GetBoundingBox().Intersects(via.GetBoundingBox()):\r\n width = item.GetWidth()\r\n dist, _ = pnt2line(via.GetPosition(), item.GetStart(), item.GetEnd())\r\n if dist <= self.clearance + width // 2 + via.GetWidth() / 2:\r\n return True\r\n return False", "def collides_with(self, bird):\n return pygame.sprite.collide_mask(self, bird)", "def checkCollision(self):\r\n self.actionColID, self.colliderID, res = self.receiver.getCollision()\r\n\r\n return res", "def collision(self):\n # Check collision with walls\n (x_coord, y_coord) = (self.x_coord[0], self.y_coord[0])\n if x_coord <= EDGE or x_coord >= SCREEN_X - self.size - EDGE or \\\n y_coord <= EDGE or y_coord >= SCREEN_Y - self.size - EDGE:\n return True\n # Check collision with self\n corners = self.get_corners()\n if self.heading == \"right\":\n (frontleft_x, frontleft_y) = (corners[1][0], corners[1][1])\n (frontright_x, frontright_y) = (corners[2][0], corners[2][1])\n elif self.heading == \"left\":\n (frontleft_x, frontleft_y) = (corners[3][0], corners[3][1])\n (frontright_x, frontright_y) = (corners[0][0], corners[0][1])\n elif self.heading == \"up\":\n (frontleft_x, frontleft_y) = (corners[0][0], corners[0][1])\n (frontright_x, frontright_y) = (corners[1][0], corners[1][1])\n elif self.heading == \"down\":\n (frontleft_x, frontleft_y) = (corners[2][0], corners[2][1])\n (frontright_x, frontright_y) = (corners[3][0], corners[3][1])\n for i in range(len(self.x_coord)):\n if self.x_coord[i] < frontleft_x < self.x_coord[i] + self.size and \\\n self.y_coord[i] < frontleft_y < self.y_coord[i] + self.size:\n return True\n if self.x_coord[i] < frontright_x < self.x_coord[i] + self.size and \\\n self.y_coord[i] < frontright_y < self.y_coord[i] + self.size:\n return True\n return False", "def __hit_bricks(self, g_object):\n return type(g_object) == GRect and g_object != self.__paddle", "def check_collision(self, p1xy, p2xy):\n p1rc = xy2rc(p1xy)\n p2rc = xy2rc(p2xy)\n rr, cc = line(int(p1rc[0]), int(p1rc[1]), int(p2rc[0]), int(p2rc[1]))\n line_coords_rc = np.vstack([rr, cc]).T\n for line_coord_rc in line_coords_rc:\n if array_in_list(line_coord_rc, list(self.obstacles)):\n return True\n return False", "def _resolve_ball_collisions(self) -> bool:\n\n bln_naughty = True\n lng_naughty_loop_count = 0\n lng_naughty_loop_limit = 10\n while bln_naughty:\n lng_naughty_loop_count += 1\n if lng_naughty_loop_count > lng_naughty_loop_limit:\n return False\n bln_naughty = False\n\n \"\"\" Ball vs Ball \"\"\"\n for sprBall1, sprBall2 in TrashyPhysics.collision_pairs_self(\n self.grpBalls, fncCollided=TrashyPhysics.balls_collided):\n bln_naughty = True\n TrashyPhysics.bounce_balls(sprBall1, sprBall2)\n\n \"\"\" Ball vs Bot \"\"\"\n for sprBall, sprRobot in TrashyPhysics.collision_pairs(\n self.grpBalls, self.grpRobots,\n fncCollided=TrashyPhysics.ball_robot_collided):\n bln_naughty = True\n TrashyPhysics.bounce_ball_off_bot(sprRobot, sprBall)\n\n \"\"\" Ball vs Wall \"\"\"\n for ball in filter(lambda x: TrashyPhysics.collided_wall(x), self.lstBalls):\n bln_naughty = True\n TrashyPhysics.bounce_ball_off_wall(ball)\n\n \"\"\" Ball vs Bumper \"\"\"\n # todo\n\n return True", "def collision_info(self, point):\n pass", "def check_intersection(obj1, obj2):\n (x1, y1, w1, h1) = obj1.get_box()\n (x2, y2, w2, h2) = obj2.get_box()\n if x2 + w2 - 1 < x1 or x2 >= x1 + w1:\n return False\n if y2 + h2 - 1 < y1 or y2 >= y1 + h1:\n return False\n \n return True", "def collisions(grid_map, rise, run):\n period = len(grid_map[0])\n return sum(row[i * run % period] == '#' for i, row in enumerate(grid_map[::rise]))", "def check_collisions(self) -> None:\n # Check collisions of bullets\n pygame.sprite.groupcollide(self.player1_bullet, self.player2_bullet, True, True)\n pygame.sprite.groupcollide(self.player1_bullet, self.mob_bullet, True, True)\n pygame.sprite.groupcollide(self.player2_bullet, self.mob_bullet, True, True)\n\n # Check player vs player collisions\n self.check_players_collision()\n\n # Check collision of mobs with player 1 bullet\n self.p1_score += self.check_player_mob_collision(self.player1_bullet)\n\n # Check collision of mobs with player 2 bullet\n self.p2_score += self.check_player_mob_collision(self.player2_bullet)", "def _in_huc(shply, huc_shply):\n if huc_shply.contains(shply):\n return 2\n elif huc_shply.intersects(shply):\n return 1\n else:\n return 0", "def _find_intersection(self):\n count = 0\n for each_list in self.lab.look():\n if each_list[1] == 'wall':\n count += 1\n if count < 2:\n return True\n else:\n return False", "def collided_with(self, entity):\n drawables = self.get_drawables()\n rectangles = []\n for d in drawables:\n rectangles.append(d.get_rect())\n return entity.get_rect().collidelist(rectangles) != -1", "def collided_with(self, entity):\n drawables = self.get_drawables()\n rectangles = []\n for d in drawables:\n rectangles.append(d.get_rect())\n return entity.get_rect().collidelist(rectangles) != -1", "def ifCollide( ball1, ball2 ):\n\t\n\tb1_x, b1_y = ball1.position.xy\n\tb2_x, b2_y = ball2.position.xy\n\t\n\t#vector connect center of particles\n\tdistant = Vector.from_points((b2_x, b2_y), (b1_x, b1_y))\n\t\n\t#if lenght of vector above is less( equal ) than sum of radius ( they overlapping )\n\tif ( ball1.r + ball2.r ) ** 2 >= distant.norm():\n\t\treturn True\n\telse:\n\t\treturn False", "def handle_collisions(self):\n\n def change_velocities(p1, p2):\n \"\"\"\n persons p1 and p2 have collided elastically: update their\n velocities.\n\n \"\"\"\n\n m1, m2 = p1.radius**2, p2.radius**2\n M = m1 + m2\n r1, r2 = p1.r, p2.r\n d = np.linalg.norm(r1 - r2)**2\n v1, v2 = p1.v, p2.v\n u1 = v1 - 2*m2 / M * np.dot(v1-v2, r1-r2) / d * (r1 - r2)\n u2 = v2 - 2*m1 / M * np.dot(v2-v1, r2-r1) / d * (r2 - r1)\n if p1.health == -100 or p1.social_dist == 1:\n p2.v *= -1\n elif p2.health == -100 or p2.social_dist == 1:\n p1.v *= -1\n else:\n p1.v = u1\n p2.v = u2\n\n\n def update_health(p1, p2):\n '''\n If collision between two persons, change their health status depending on health of both\n the persons that collided\n '''\n if p1.health == -1 and p2.health == 1:\n p2.health = -1\n elif p2.health == -1 and p1.health == 1:\n p1.health = -1\n\n # We're going to need a sequence of all of the pairs of persons when\n # we are detecting collisions. combinations generates pairs of indexes\n # into the self.persons list of persons on the fly.\n pairs = combinations(range(self.n), 2)\n\n for i,j in pairs:\n if self.persons[i].overlaps(self.persons[j]):\n change_velocities(self.persons[i], self.persons[j])\n update_health(self.persons[i], self.persons[j])\n self.collisions += 1", "def detect_collision():\n # with the top and bottom of screen\n if ball.ycor() > GAME_Y_BARRIER or ball.ycor() < -GAME_Y_BARRIER:\n ball.bounce_y()\n # with the paddles\n if ball.distance(paddle_right) < 50 and ball.xcor() > GAME_X_BARRIER \\\n or ball.distance(paddle_left) < 50 and ball.xcor() < -GAME_X_BARRIER:\n ball.bounce_x()", "def victory_checker() -> bool:\r\n conflict_check()\r\n for x in range(shape):\r\n for y in range(shape):\r\n if conflict_space[x, y] != 0:\r\n return False\r\n if separation_crawler(False):\r\n return False\r\n return True" ]
[ "0.80110776", "0.7117187", "0.69468826", "0.69385827", "0.69306934", "0.6928933", "0.6904321", "0.6890036", "0.6859777", "0.681465", "0.6788974", "0.6769276", "0.67647177", "0.6714456", "0.6694013", "0.66029567", "0.6584946", "0.6561978", "0.65351593", "0.6516069", "0.65115017", "0.64763385", "0.64739233", "0.646845", "0.64676607", "0.64428395", "0.64215994", "0.6407357", "0.6402175", "0.6386923", "0.63721126", "0.63580227", "0.6350106", "0.63458216", "0.6336036", "0.63270867", "0.63109946", "0.63065344", "0.62857676", "0.62598026", "0.62562394", "0.62546575", "0.62406296", "0.62341803", "0.62160647", "0.6207407", "0.6195274", "0.61790824", "0.61755025", "0.61747116", "0.61685413", "0.6166464", "0.614383", "0.6136455", "0.6127348", "0.6097352", "0.6095383", "0.6091142", "0.6088879", "0.60771513", "0.60758704", "0.60684645", "0.6067559", "0.6067325", "0.6059592", "0.6046176", "0.6029416", "0.60292375", "0.60292375", "0.6023698", "0.6015381", "0.6014106", "0.601131", "0.60075444", "0.6003766", "0.59998894", "0.5992454", "0.5977935", "0.5969317", "0.5968726", "0.5948991", "0.59484166", "0.5939782", "0.5936983", "0.59361285", "0.5931134", "0.59260166", "0.59182346", "0.58985984", "0.5894631", "0.5889212", "0.5887552", "0.58865786", "0.5876104", "0.58666", "0.58666", "0.58613443", "0.58595395", "0.58482355", "0.58439845" ]
0.590947
88
Calculate quality of the ligand depiction. The higher the worse. Ideally that should be 0.
def depiction_score(self): collision_penalty = 1 degenerated_penalty = 0.4 bond_collisions = self.count_bond_collisions() degenerated_atoms = self.count_suboptimal_atom_positions(0.0, 0.5) score = ( collision_penalty * bond_collisions + degenerated_penalty * degenerated_atoms ) return round(score, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def quality(self) -> float:\n if self.get_cover_size() == 0:\n return 0\n else:\n if self.baseline == Baseline.COMPLEMENT:\n return self.__complement_quality()\n else:\n return self.__population_quality()", "def quality(self) -> int:\n return self._quality", "def get_multiplier(quality):\n\n if quality == \"low\":\n return 5\n elif quality == \"medium\":\n return 6\n elif quality == \"good\":\n return 7\n elif quality == \"high\":\n return 8\n return 6", "def quality(self):\n return self.plays * self.number", "def qualityToProbability(qual, isSolexa=True):\n\n if isSolexa:\n return 1 / (1 + 10 ** (qual/10.0))\n else:\n return 10 ** (-qual / 10.0)", "def quality(X, boost=2):\n X = np.abs(np.sort(X, axis=-1).astype(float))\n Q = 1 - np.log(2 + X[..., -2]) / np.log(2 + X[..., -1])\n Q = (Q * 2).clip(0, 1)\n return Q", "def getSignalQualityInDBM(self):\n return (float(self.wlanSignalQuality) / 2.0) - 100.0", "def set_quality(self):\n p = self.suitability + 1.15 * self.fono\n self.quality = np.exp(p) / (1 + np.exp(p))", "def minimum_featured_quality(self):\n value = self.setting(self.MINIMUM_FEATURED_QUALITY).float_value\n if value is None:\n value = 0.65\n return value", "def perfectrefl(wavelength):\n return 1.0", "def microphone_sensitivity(transferfactor: float) -> float:\n return amp2db(transferfactor/1000.)", "def lowComplexityFraction(self):\n length = len(self)\n if length:\n lowerCount = len(list(filter(str.islower, self.sequence)))\n return float(lowerCount) / length\n else:\n return 0.0", "def Illumina_Sanger(qual):\n QualityScoreOut = ''\n for quality in qual:\n newQual = chr((ord(quality) - 64) + 33)\n QualityScoreOut += newQual\n return QualityScoreOut", "def quality(value: str) -> str:\n if \"HDTV\" in value:\n return \"HDTV\"\n else:\n return \"SD\"", "def quality_rating(PR):\n \n if PR <= 85:\n quality = \"poor\"\n elif PR < 90:\n quality = \"mediocre\"\n elif PR < 95:\n quality = \"good\"\n else:\n quality = \"great\"\n\n return quality", "def fkg(word, sent, syll):\n flesch_kincaid_grade = (0.39* (word / sent)) + (11.8 * (syll / word)) - 15.59\n return flesch_kincaid_grade", "def sharpness_penalty(self):\n # This polynomial function gives the gain for peaking filter which achieves 18 dB / octave max derivative\n # The polynomial estimate is accurate in the vicinity of 18 dB / octave\n gain_limit = -0.09503189270199464 + 20.575128011847003 * (1 / self.q)\n # Scaled sigmoid function as penalty coefficient\n x = self.gain / gain_limit - 1\n sharpness_penalty_coefficient = 1 / (1 + np.e ** (-x * 100))\n return np.mean(np.square(self.fr * sharpness_penalty_coefficient))", "def FindScale(self):\n\n ## 6 and from the cv code the distance is 6 then we are good\n print(\"TODO: Very hard\")", "def image_quality(img):\n # convert bgr image to gray -> float32\n score = 0.0\n if img is None:\n return score\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n x = gray.astype(np.float32)\n h, w = x.shape[0], x.shape[1]\n\n # horizontal\n d_h = x[:,1:] - x[:,:-1]\n w_bound = int(8*(np.floor(w/8.0)-1)) + 1\n B_h = np.mean(np.abs(d_h[:,7:w_bound:8]))\n A_h = (8.0 * np.mean(np.abs(d_h)) - B_h) / 7.0\n sig_h = np.sign(d_h)\n left_sig, right_sig = sig_h[:,:-2], sig_h[:,1:-1]\n Z_h = np.mean((left_sig * right_sig)<0)\n\n # vertical\n d_v = x[1:, :] - x[:-1, :]\n h_bound = int(8*(np.floor(h/8.0)-1)) + 1\n B_v = np.mean(np.abs(d_v[7:h_bound:8, :]))\n A_v = (8.0 * np.mean(np.abs(d_v)) - B_v) / 7.0\n sig_v = np.sign(d_v)\n up_sig, down_sig = sig_v[:-2, :], sig_v[1:-1, :]\n Z_v = np.mean((up_sig * down_sig)<0)\n\n # combine the weights\n B = (B_h + B_v)/2.0\n A = (A_h + A_v)/2.0\n Z = (Z_h + Z_v)/2.0\n\n # quality prediction\n alpha = -245.8909\n beta = 261.9373\n gamma1 = -239.8886 / 10000.0 \n gamma2 = 160.1664 / 10000.0 \n gamma3 = 64.2859 / 10000.0 \n\n # corner case of a black / white frame\n if np.abs(A) < 1e-3 or np.abs(B) < 1e-3 or np.abs(Z) < 1e-3:\n score = 0.0\n else:\n score = alpha + beta*(B**gamma1)*(A**gamma2)*(Z**gamma3)\n\n return score", "def search_quality(self,strz):\n\t\tfor q in l_quality: #l_quality = list of allow quality words\n\t\t\tif q in strz:\n\t\t\t\tself.quality=q.replace(\".\",\"\")\n\t\t\t\treturn strz.replace(q,\"\")\n\t\treturn strz", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - (self.gain - self.fr[fc_ix + n - 1:fc_ix - 1:-1])))", "def info_gain(left, right, current_uncertainty):\n p = float(len(left)) / (len(left) + len(right))\n return current_uncertainty - p * gini(left) - (1 - p) * gini(right)", "def _compute_noise_level(self, data):\n noise = max(data)\n noise_min = 2600\n noise_max = 4095\n ratio = (noise - noise_min)/(noise_max - noise_min)\n return int(ratio*100)", "def determine_quality(self, function):\n if self.ground_truth_annotation_select.value is None:\n return None\n if self.segmentation_result_select.value is None:\n return None\n if self.segmentation_result_select.value is self.ground_truth_annotation_select.value:\n return None\n\n if self.ground_truth_annotation_select.value.data.max() == 0:\n return\n if self.segmentation_result_select.value.data.max() == 0:\n return\n\n quality = function(self.ground_truth_annotation_select.value.data, self.segmentation_result_select.value.data)\n\n return quality", "def __complement_quality(self) -> float:\n group = np.zeros(shape=self.Dataset.size)\n np.put(group, self.get_cover(), 1)\n\n time = self.Dataset.survival\n status = self.Dataset.status\n\n _, pvalue = sm.duration.survdiff(time, status, group)\n return 1 - pvalue", "def digital_gain():\n def r(x):\n return x/512.\n\n def w(x):\n return int(x*512)\n return r, w", "def microphone_transferfactor(sensitivity: float) -> float:\n a = db2amp(sensitivity)\n return a * 1000 # convert it to mV", "def get_printer_quality(self):\n return self.parent.printer.get_quality()", "def get_strength(self):\n return 10 - self.get_agility()", "def info_gain(self, left, right, current_uncertainty):\n p = float(len(left)) / (len(left) + len(right))\n return current_uncertainty - p * self.gini(left) - (1 - p) * self.gini(right)", "def understandability(self):\n # self._understandability = - 0.33 * self.ANA + 0.33 * self.DAM - 0.33 * self.DCC + 0.34 * self.CAMC \\\n # - 0.33 * self.NOP - 0.33 * self.NOM - 0.33 * self.DSC\n self._understandability = - 0.33 * self.ANA + 0.66 * self.DAM - 0.33 * self.DCC + 0.66 * self.CAMC \\\n - 0.33 * self.NOP - 0.33 * self.NOM\n return round(self._understandability, 5)", "def strength(self) -> float:\n ...", "def genQuality(self):\n return np.clip(np.random.normal(self.qavgs, self.qstdevs), 0, 40)", "def updateLoudness(self, A):\n\t\tnA = A * self.alpha\n\t\treturn nA if nA > 1e-13 else self.A", "def rmdspe(self) -> float:\n return float(np.sqrt(np.median(np.square(self._percentage_error()))) * 100.0)", "def resolution(self):\n\t\tif self.name.endswith( '_LOW' ):\n\t\t\treturn 'LOW'\n\t\telif self.name.endswith( '_MID' ):\n\t\t\treturn 'MID'\n\t\telse:\n\t\t\treturn 'HIGH'", "def enforce_quality_limits(self):\n if self.orig_quality <= 50:\n if self.quality >= 50:\n self.quality = 50", "def parse_quality_for_video (self, video):\n quality = '720'\n if video['videoQuality']['hasHD']:\n quality = '1080'\n if video['videoQuality']['hasUltraHD']:\n quality = '4000'\n return quality", "def reusability(self):\n self._reusability = -0.25 * self.DCC + 0.25 * self.CAMC + 0.5 * self.CIS + 0.5 * self.DSC\n return round(self._reusability, 5)", "def len23(self) -> float:\n ...", "def getQuality(self, obereZeile, untereZeile):\n qualitaetsZeile = \"\"\n if self.aligntIsDna:\n _exakter_match_list = self.DNA_EXAKTER_MATCH\n _guter_match_list = self.DNA_GUTER_MATCH\n _kein_guter_match_list = self.DNA_KEIN_GUTER_MATCH\n _qualitaetsListe = self.qualitaetsListe\n else:\n _exakter_match_list = self.AA_EXAKTER_MATCH\n _guter_match_list = self.AA_GUTER_MATCH\n _kein_guter_match_list = self.AA_KEIN_GUTER_MATCH\n _qualitaetsListe = self.qualitaetsListeProteins\n \n for i in range(len(obereZeile)):\n if (\n obereZeile[i] == self.INPUT_GAP_ZEICHEN or\n untereZeile[i] == self.INPUT_GAP_ZEICHEN \n ):\n qualitaetsZeile += self.QUAL_GAP_ZEICHEN\n else:\n currentResiduePair = str.upper(obereZeile[i] + untereZeile[i])\n # print(currentResiduePair)\n indexOfPair = _qualitaetsListe.index(currentResiduePair)\n if indexOfPair in _exakter_match_list:\n qualitaetsZeile += self.EXAKTER_MATCH_ZEICHEN\n if indexOfPair in _guter_match_list:\n qualitaetsZeile += self.GUTER_MATCH_ZEICHEN\n if indexOfPair in _kein_guter_match_list:\n qualitaetsZeile += self.KEIN_GUTER_MATCH_ZEICHEN\n return(qualitaetsZeile)", "def get_expected_compression_ratio_pct(self) -> int:\n return 100", "def quality_data(self, s):\n known_symbols = np.mod(range(176),48)>=32\n print('quality_data',np.sum(np.real(s[known_symbols])<0))\n success = np.sum(np.real(s[known_symbols])<0) < 20\n return success,0 ## no doppler estimate for data frames", "def rsrq_quality_rating(value, unit):\n\n if unit != \"dB\":\n raise ValueError(\"Unsupported unit '{:}'\".format(unit))\n\n rating = 0\n if value > -6:\n rating = 4\n elif -6 >= value > -9:\n rating = 3\n elif -9 >= value > -16:\n rating = 2\n elif value <= -16:\n rating = 1\n\n return rating", "def efficiency(self):\n if self.byte_total == 0:\n return 1\n return self.entropy() / 8", "def get_information_gain(self, word, documents):\n gain = self.get_entropy(documents)\n with_word, without_word = self.get_split_data(word, documents)\n gain -= self.get_entropy(with_word) * len(with_word) / len(documents)\n gain -= self.get_entropy(without_word) * len(without_word) / len(documents)\n return gain", "def test_saturation_mixing_ratio():\n p = 999. * units.mbar\n t = 288. * units.kelvin\n assert_almost_equal(saturation_mixing_ratio(p, t), .01068, 3)", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - self.fr[fc_ix + n - 1:fc_ix - 1:-1]))", "def type(self):\n return self.INEQUALITY", "def type(self):\n return self.INEQUALITY", "def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]:\n freq_increment = np.mean(np.diff(fit_data.x_data))\n\n fit_a = fit_data.ufloat_params[\"a\"]\n fit_b = fit_data.ufloat_params[\"b\"]\n fit_freq = fit_data.ufloat_params[\"freq\"]\n fit_kappa = fit_data.ufloat_params[\"kappa\"]\n\n snr = abs(fit_a.n) / np.sqrt(abs(np.median(fit_data.y_data) - fit_b.n))\n fit_width_ratio = fit_kappa.n / np.ptp(fit_data.x_data)\n\n criteria = [\n fit_data.x_range[0] <= fit_freq.n <= fit_data.x_range[1],\n 1.5 * freq_increment < fit_kappa.n,\n fit_width_ratio < 0.25,\n fit_data.reduced_chisq < 3,\n curve.utils.is_error_not_significant(fit_kappa),\n snr > 2,\n ]\n\n if all(criteria):\n return \"good\"\n\n return \"bad\"", "def degradation_due_to_upscaling(coding_res, display_res):\n scale_factor = display_res / coding_res\n scale_factor = max(scale_factor, 1)\n u1 = 72.61\n u2 = 0.32\n deg_scal_v = u1 * np.log10(u2 * (scale_factor - 1.0) + 1.0)\n deg_scal_v = utils.constrain(deg_scal_v, 0.0, 100.0)\n return deg_scal_v", "def _evaluate_quality(self, fit_data: FitData) -> Union[str, None]:\n return None", "def calculate_measureland_qualifier_flag_overall(row):\n\n mqf_tuple = (row['measureland_qualifier_flag_speed'],\n row['measureland_qualifier_flag_distance'],\n row['measureland_qualifier_flag_acceleration'],\n row['measureland_qualifier_flag_visual'])\n\n if mqf_tuple.count(3) >= 1:\n return 3 # probably bad value\n elif mqf_tuple.count(1) == len(mqf_tuple):\n return 1 # good value\n elif (mqf_tuple.count(9) >= 1) and (mqf_tuple.count(1) == (len(mqf_tuple) - mqf_tuple.count(9))):\n return 2 # probably good value\n elif (mqf_tuple.count(2) >= 1) and (mqf_tuple.count(1) == (len(mqf_tuple) - mqf_tuple.count(2))):\n return 2 # probably good value\n else:\n return 2 # values that have passed the quality check are likely to be of good quality according to the criteria used, so assign as probably good value", "def test_quality_filter_illumina_qual(self):\r\n # header with no qual data passes\r\n header = \"990:2:4:11271:5323/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=0.75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # header with no qual data passes\r\n header = \"990:2:4:11271:5323/0\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # header with no qual data passes (old barcode in header format)\r\n header = \"HWI-6X_9267:1:1:4:1699#ACCACCC/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # bad qual fails filter\r\n header = \"@HWI-ST753_50:6:1101:1138:1965#0/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (3,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # bad qual passes filter if filter turned off\r\n header = \"@HWI-ST753_50:6:1101:1138:1965#0/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=False)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # good qual passes filter\r\n header = \"@HWI-ST753_50:6:1101:1138:1965#1/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbbbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))", "def _get_lip_best(self) -> float:\n pass", "def test_density_to_air_quality():\n assert density_to_air_quality(0) == 1\n assert density_to_air_quality(35) == 1\n assert density_to_air_quality(35.1) == 2\n assert density_to_air_quality(75) == 2\n assert density_to_air_quality(115) == 3\n assert density_to_air_quality(150) == 4\n assert density_to_air_quality(300) == 5", "def fk_grade(self, doc):\n num_sentences = _get_num_sentences(doc)\n num_words = _get_num_words(doc)\n num_syllables = _get_num_syllables(doc)\n if num_sentences == 0 or num_words == 0 or num_syllables == 0:\n return 0\n return (\n (11.8 * num_syllables / num_words)\n + (0.39 * num_words / num_sentences)\n - 15.59\n )", "def ultratio(chrlength, uniqueratio, chrtotalreads, frcount):\n ultratio = chrlength * uniqueratio / (chrtotalreads - frcount)\n\n return ultratio", "def quality_fis(self,fis):\n correct = 0\n count = 0\n for cl_state in self.classes:\n r,c = cl_state.quality_fis(fis)\n print \"For\",cl_state.name,r,\"/\",c\n correct += r\n count += c\n return (correct,count)", "def FindResistance(FirstBand,SecondBand,ThirdBand,FourthBand=\"blank\"):\n\tFirstBand = str(FirstandSecondBandValues[FirstBand])\n\tSecondBand = str(FirstandSecondBandValues[SecondBand])\n\t#Multiplying first two digits with Multiplier(Third Band) convert to string so it can be concatenated with the other strings \n\treturn(str(int(FirstBand+SecondBand)*MultiplierValues[ThirdBand])+ \" ohms \" + PrecisionValues[FourthBand])", "def ratio(n1,n2, explain=0, optimize=False):\n weight_normal_form = 5.0 #distance between soundexes of normal form\n weight_normal_form_soundex = 8.0 #average distance between soundexes of normal form\n weight_geslachtsnaam1 = 10.0 #distance between soundexes of geslachtsnamen\n weight_geslachtsnaam2 = 10.0 #distance between geslachtsnaam\n weight_initials = 2 #distance between initials\n\n nf1 = n1.guess_normal_form()\n nf2 = n2.guess_normal_form()\n\n if not nf1 or not nf2:\n return 0.0\n elif nf1 == nf2:\n return 1.0\n ratio_normal_form = Similarity.average_distance(split(nf1), split(nf2))\n \n #create a simkplified soundex set for this name\n #remove stopwords\n# nf1 = remove_stopwords( nf1)\n# nf2 = remove_stopwords( nf2)\n \n se1 = n1.get_normal_form_soundex()\n se2 = n2.get_normal_form_soundex()\n ratio_normal_form_soundex = Similarity.average_distance( se1, se2)\n \n #gelachtsnaam wordt op twee manieren met elkaar vergeleken\n g1 = n1.geslachtsnaam() #or n1.get_volledige_naam()\n g2 = n2.geslachtsnaam() #or n2.get_volledige_naam()\n g1 = to_ascii(g1)\n g2 = to_ascii(g2)\n if not optimize:\n #de soundexes van de achternaam worden meegewoen\n #g1_soundex = n1.soundex_nl(g1, group=2, length=-1)\n g1_soundex = n1.geslachtsnaam_soundex()\n #g2_soundex = n2.soundex_nl(g2, group=2, length=-1)\n g2_soundex = n2.geslachtsnaam_soundex()\n ratio_geslachtsnaam1 = Similarity.average_distance(g1_soundex, g2_soundex)\n else:\n ratio_geslachtsnaam1 = 1 \n weight_geslachtsnaam1 = 0\n \n #n de afstand van de woorden in de achtenraam zelf\n ratio_geslachtsnaam2 = Similarity.average_distance(\n re.split('[ \\.\\,\\-]', g1.lower()),\n re.split('[ \\.\\,\\-]', g2.lower()),\n levenshtein_ratio)\n n1_initials = n1.initials()\n n1_initials_lower = n1_initials.lower()\n n2_initials = n2.initials()\n n2_initials_lower = n2_initials.lower()\n n1_contains_initials = n1.contains_initials()\n n2_contains_initials = n2.contains_initials()\n #count initials only if we have more than one\n #(or perhaps make this: if we know the first name)\n if len(n1_initials) == 1 or len(n2_initials) == 1:\n #initials count much less if there is only one\n weight_initials = weight_initials_if_one_name_consists_of_one_word_only\n# ratio_initials = .5\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n elif n1_contains_initials or n2_contains_initials:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n weight_initials = weight_initials_if_one_name_is_in_initials\n elif len(n1_initials) > 1 and len(n2_initials) > 1:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n else:\n ratio_initials = 0.7\n \n if n1_contains_initials or n2_contains_initials:\n weight_normal_form = weight_normal_form_if_one_name_is_in_initials \n weight_normal_form_soundex = weight_normal_form_soundex_if_one_name_is_in_initials\n\n counter = (ratio_normal_form * weight_normal_form +\n ratio_normal_form_soundex * weight_normal_form_soundex +\n ratio_geslachtsnaam1 * weight_geslachtsnaam1 +\n ratio_geslachtsnaam2 * weight_geslachtsnaam2 +\n ratio_initials * weight_initials)\n numerator = (weight_normal_form + weight_normal_form_soundex +\n weight_initials + weight_geslachtsnaam1 + weight_geslachtsnaam2)\n if numerator == 0:\n return 0.0\n final_ratio = counter/numerator\n\n if explain:\n s = '-' * 100 + '\\n'\n s += 'Naam1: %s [%s] [%s] %s\\n' % (n1, n1_initials, n1.guess_normal_form(), se1)\n s += 'Naam2: %s [%s] [%s] %s\\n' % (n2, n2_initials, n2.guess_normal_form(), se2)\n s += 'Similarity ratio: %s\\n' % final_ratio\n s += '--- REASONS' + '-' * 30 + '\\n'\n format_s = '%-30s | %-10s | %-10s | %-10s | %-10s | %s-10s\\n'\n s += format_s % ('\\t property', ' ratio', ' weight','relative_weight', ' r*w', 'r * relative_w')\n s += '\\t' + '-' * 100 + '\\n'\n format_s = '\\t%-30s | %-10f | %-10f | %-10f | %-10f | %-10f\\n'\n s += format_s % (' normal_form', ratio_normal_form, weight_normal_form,weight_normal_form/counter, ratio_normal_form * weight_normal_form, ratio_normal_form * weight_normal_form/counter)\n s += format_s % ('soundex van normal_form', ratio_normal_form_soundex, weight_normal_form_soundex,weight_normal_form_soundex/counter, ratio_normal_form_soundex* weight_normal_form_soundex, ratio_normal_form_soundex * weight_normal_form_soundex/counter)\n s += format_s % ('soundex van geslachtsnaam1', ratio_geslachtsnaam1, weight_geslachtsnaam1,weight_geslachtsnaam1/counter, ratio_geslachtsnaam1 * weight_geslachtsnaam1, ratio_geslachtsnaam1 * weight_geslachtsnaam1/counter)\n s += format_s % ('geslachtsnaam', ratio_geslachtsnaam2, weight_geslachtsnaam2,weight_geslachtsnaam2/counter, ratio_geslachtsnaam2 *weight_geslachtsnaam2 , ratio_geslachtsnaam2 * weight_geslachtsnaam2/counter)\n s += format_s % ('initials', ratio_initials, weight_initials, weight_initials/counter, ratio_initials *weight_initials, ratio_initials * weight_initials/counter)\n s += '\\tTOTAL (numerator) | %s (counter = %s)\\n' % (counter, numerator)\n \n return s\n return final_ratio", "def calc_gain(s, i):\n return math.sqrt((i + s) / (6 * s))", "def freq_optimization(self):\n index = identify_scale(self.vz, True)\n # In case the patient is limping\n if index > 35:\n index = index / 2\n print(f\"Scale used is {index}\")", "def anisotropy_solution(r):\n return r**2", "def make_light_prob(distance):\n if distance <= 1250 / 9:\n return 1\n return .99 * make_light_prob(distance - 250 / 9)", "def __population_quality(self) -> float:\n population_identifier = np.zeros(shape=self.Dataset.size)\n subgroup_identifier = np.ones(shape=len(self.get_cover()))\n group = np.concatenate((population_identifier,\n subgroup_identifier))\n\n subgroup_times = self.Dataset.survival[self.get_cover()]\n subgroup_status = self.Dataset.status[self.get_cover()]\n\n time = np.concatenate((self.Dataset.survival, subgroup_times))\n status = np.concatenate((self.Dataset.status, subgroup_status))\n\n _, pvalue = sm.duration.survdiff(time, status, group)\n return 1 - pvalue", "def quick_ratio(self):\n return (\n self.current_assets - self.inventory_net) / self.current_liabilities", "def calc_slow(self, demands: Demand, routing: Routing) -> float:\n edge_capacities = [e[2]['weight'] for e in\n sorted(self.graph.edges(data=True))]\n link_utilisation = self.calc_overall_link_utilisation(demands, routing)\n # Because utilisation compared to link width is what we care about here\n ratio_capacities = np.divide(link_utilisation, edge_capacities)\n\n return np.max(ratio_capacities)", "def effectiveness(self):\n self._effectiveness = 0.20 * self.ANA + 0.20 * self.DAM + 0.20 * self.MOA + 0.20 * self.MFA + 0.20 * self.NOP\n return round(self._effectiveness, 5)", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def _update_quality(self, quality:float):\n if quality is not None:\n self.quality_text.setText(str(quality)[0:5])\n if quality > 0.9:\n self.quality_text.setStyleSheet(\"QLabel { color : green }\")\n elif quality > 0.5:\n self.quality_text.setStyleSheet(\"QLabel { color : yellow }\")\n else:\n self.quality_text.setStyleSheet(\"QLabel { color : red }\")\n else:\n self.quality_text.setText(\"\")", "def audio_quality_key(option):\n return (\n AUDIO_RATING_DICT[option.media_type.audio_format],\n option.media_type.audio_bitrate\n )", "def flexibility(self):\n self._flexibility = 0.25 * self.DAM - 0.25 * self.DCC + 0.5 * self.MOA + 0.5 * self.NOP\n return round(self._flexibility, 5)", "def FWHM(self):\n # The width of the Lorentz profile\n fl = 2.0 * self[\"al\"]\n # Width of the Gaussian [2.35 = 2*sigma*sqrt(2*ln(2))]\n fd = 2.35482 * self['ad']\n return 0.5346 * fl + numpy.sqrt(0.2166 * (fl**2.) + fd**2.)", "def golden_ratio():\n return 1.61803398875", "def overall_sensitivity(self):\n if self.mod1:\n s = torch.max(torch.max(self.weight, -1)[0], -1)[0].item()\n else:\n s = torch.max(torch.sqrt(torch.sum(self.weight * self.weight, -1)))[0].item()\n s *= np.sqrt(2. / np.e)\n return s", "def get_scale():\r\n\r\n \r\n return 0.5", "def quality(self):\n try:\n qid = int((self.tool_metadata or {}).get(\"quality\", 0))\n except:\n qid = 0\n\n # We might be able to get the quality strings from the item's tags\n internal_name, name = \"normal\", \"Normal\"\n if self.tags:\n tags = {x.get('category'): x for x in self.tags}\n if 'Quality' in tags:\n internal_name, name = tags['Quality'].get('internal_name'), tags['Quality'].get('name')\n\n return qid, internal_name, name", "def ecio_quality_rating(value, unit):\n\n if unit != \"dBm\":\n raise ValueError(\"Unsupported unit '{:}'\".format(unit))\n\n rating = 0\n if value > -2:\n rating = 4\n elif -2 >= value > -5:\n rating = 3\n elif -5 >= value > -10:\n rating = 2\n elif value <= -10:\n rating = 1\n\n return rating", "def increment_quality(self, increment_unit):\n if self.quality > self.min_quality and self.quality < self.max_quality:\n self.quality = self.quality + increment_unit\n return self.quality", "def ility(self) -> str:\n return self.system_quality_attribute()", "def g_score(self):\n _, _, I_CK = self._entropies()\n return 2.0 * I_CK", "def clarity_conversion(string):\n if (string == 'I1'):\n return 0.125\n if (string == 'SI2'):\n return 0.25\n if (string == 'SI1'):\n return 0.375\n if (string == 'VS2'):\n return 0.5\n if (string == 'VS1'):\n return 0.625\n if (string == 'VVS2'):\n return 0.75\n if (string == 'VVS1'):\n return 0.875\n if (string == 'IF'):\n return 1", "def luminosity_distance(self, z):\n return self.proper_distance(z) * (1 + z)", "def estimate_phones(x):\n if x['mean_luminosity_km2'] > 5:\n return 10\n elif x['mean_luminosity_km2'] > 1:\n return 5\n else:\n return 1", "def resolution(self, level):\n return 2 ** (level - 1)", "def specificity(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result += value / focal.cardinal\n return round(result, 6)", "def end_rate(lablled,str):\n#\tlabelled = RawClaim.objects.exclude(correcttrim=\"\")\n\tbad = [l for l in labelled if str in l.correcttrim and normal(l.correcttrim)]\n\tgood = [l for l in labelled if correct_ends_with(l.sentence,l.correcttrim,str)]\n\treturn (float(len(good))/(len(good)+len(bad)),bad,good)", "def extendability(self):\n self._extendability = 0.50 * self.ANA - 0.50 * self.DCC + 0.50 * self.MFA + 0.50 * self.NOP\n return round(self._extendability, 5)", "def test_coded_freq(self):\n self.assertAlmostEqual(self.g.coded_freq(), 10 / 18)", "def eval_fis(self,fis):\n #res = 0.0\n #for cl_state in self.classes:\n # res += cl_state.eval_fis(fis)\n #print \"=>\",res\n #return 1.0/res\n try:\n correct,count = self.quality_fis(fis)\n except Exception as err:\n print err\n correct = 0\n return correct", "def gain(self):\n return self[1]", "def test_saturation_mixing_ratio_dimensions():\n p = 998. * units.mbar\n temp = 20 * units.celsius\n assert str(saturation_mixing_ratio(p, temp).units) == 'dimensionless'", "def fk_ease(self, doc):\n num_sentences = _get_num_sentences(doc)\n num_words = _get_num_words(doc)\n num_syllables = _get_num_syllables(doc)\n if num_sentences == 0 or num_words == 0 or num_syllables == 0:\n return 0\n words_per_sent = num_words / num_sentences\n syllables_per_word = num_syllables / num_words\n return 206.835 - (1.015 * words_per_sent) - (84.6 * syllables_per_word)", "def sRGBContrastRatio(color1,color2):\n l1=srgbLuminance(color1)\n l2=srgbLuminance(color2)\n return (max(l1,l2)+0.05)/(min(l1,l2)+0.05)", "def information(self, fdist):\n freq = fdist.get(self.string)\n if not freq:\n freq = 0\n return 1 - (log(freq + 1) / log(fdist.N() + 1))", "def lms_gain(self):\n return self._lms_gain", "def lorentzian(self, params):\n height, width, c_freq = params\n return height / (1.0+ (4.0 / width**2)*(self.freqs - c_freq)**2)", "def likelihood(seq):\n global qmap\n if qmap is None:\n qmap = {'!': 1.0, '\"': 0.7943282347242815, '#': 0.6309573444801932, '$': 0.5011872336272722, '%': 0.3981071705534972, '&': 0.31622776601683794, \"'\": 0.251188643150958, '(': 0.19952623149688797, ')': 0.15848931924611134, '*': 0.12589254117941673, '+': 0.1, ',': 0.07943282347242814, '-': 0.06309573444801933, '.': 0.05011872336272722, '/': 0.039810717055349734, '0': 0.03162277660168379, '1': 0.025118864315095794, '2': 0.0199526231496888, '3': 0.015848931924611134, '4': 0.012589254117941675, '5': 0.01, '6': 0.007943282347242814, '7': 0.00630957344480193, '8': 0.005011872336272725, '9': 0.003981071705534973, ':': 0.0031622776601683794, ';': 0.0025118864315095794, '<': 0.001995262314968879, '=': 0.001584893192461114, '>': 0.0012589254117941675, '?': 0.001, '@': 0.0007943282347242813, 'A': 0.000630957344480193, 'B': 0.0005011872336272725, 'C': 0.00039810717055349735, 'D': 0.00031622776601683794, 'E': 0.00025118864315095795, 'F': 0.00019952623149688788, 'G': 0.00015848931924611142, 'H': 0.00012589254117941674, 'I': 0.0001, 'J': 7.943282347242822e-05, 'K': 6.309573444801929e-05, 'L': 5.011872336272725e-05, 'M': 3.9810717055349695e-05, 'N': 3.1622776601683795e-05, 'O': 2.5118864315095822e-05, 'P': 1.9952623149688786e-05, 'Q': 1.584893192461114e-05, 'R': 1.2589254117941661e-05, 'S': 1e-05, 'T': 7.943282347242822e-06, 'U': 6.30957344480193e-06, 'V': 5.011872336272725e-06, 'W': 3.981071705534969e-06, 'X': 3.162277660168379e-06, 'Y': 2.5118864315095823e-06, 'Z': 1.9952623149688787e-06, '[': 1.584893192461114e-06, '\\\\': 1.2589254117941661e-06, ']': 1e-06, '^': 7.943282347242822e-07, '_': 6.30957344480193e-07, '`': 5.011872336272725e-07, 'a': 3.981071705534969e-07, 'b': 3.162277660168379e-07, 'c': 2.5118864315095823e-07, 'd': 1.9952623149688787e-07, 'e': 1.584893192461114e-07, 'f': 1.2589254117941662e-07, 'g': 1e-07, 'h': 7.943282347242822e-08, 'i': 6.30957344480193e-08, 'j': 5.011872336272725e-08, 'k': 3.981071705534969e-08, 'l': 3.162277660168379e-08, 'm': 2.511886431509582e-08, 'n': 1.9952623149688786e-08, 'o': 1.5848931924611143e-08, 'p': 1.2589254117941661e-08, 'q': 1e-08, 'r': 7.943282347242822e-09, 's': 6.309573444801943e-09, 't': 5.011872336272715e-09, 'u': 3.981071705534969e-09, 'v': 3.1622776601683795e-09, 'w': 2.511886431509582e-09, 'x': 1.9952623149688828e-09, 'y': 1.584893192461111e-09, 'z': 1.2589254117941663e-09, '{': 1e-09, '|': 7.943282347242822e-10, '}': 6.309573444801942e-10, '~': 5.011872336272714e-10, '\\x7f': 3.9810717055349694e-10, '\\x80': 3.1622776601683795e-10, '\\x81': 2.511886431509582e-10, '\\x82': 1.9952623149688828e-10, '\\x83': 1.584893192461111e-10, '\\x84': 1.2589254117941662e-10, '\\x85': 1e-10}\n return [qmap[i] for i in seq]", "def quality(self, value: int):\n # TODO - Ensure that this is valid\n self._quality = value" ]
[ "0.66823983", "0.6546814", "0.65287745", "0.6406672", "0.6386629", "0.6259903", "0.6180249", "0.6049594", "0.6018899", "0.5956084", "0.58994746", "0.5833759", "0.57996166", "0.5788256", "0.57742614", "0.5768054", "0.57475317", "0.5742199", "0.5725896", "0.57138395", "0.5672698", "0.5661801", "0.56566405", "0.5644218", "0.563214", "0.56309956", "0.56110656", "0.56089026", "0.55952805", "0.55943227", "0.55822664", "0.55634904", "0.5559385", "0.55505514", "0.55311537", "0.551982", "0.5485562", "0.5480136", "0.54564196", "0.5454975", "0.5452466", "0.5445441", "0.5443351", "0.54299414", "0.54237336", "0.5414939", "0.54147094", "0.54030275", "0.5397155", "0.5397155", "0.5396214", "0.53961307", "0.5394897", "0.53850967", "0.53563553", "0.5349088", "0.53412396", "0.5339614", "0.5338015", "0.53243965", "0.53230447", "0.53086895", "0.53067815", "0.5306659", "0.530598", "0.5301419", "0.5292318", "0.52874416", "0.52850443", "0.5270374", "0.52675223", "0.52526116", "0.52470493", "0.5242106", "0.5236581", "0.52333295", "0.52310187", "0.5230339", "0.5227409", "0.5224077", "0.5220362", "0.5215766", "0.5212527", "0.5192161", "0.519016", "0.5179341", "0.5168026", "0.5162102", "0.51560473", "0.51533717", "0.51530874", "0.51468724", "0.5140744", "0.5137607", "0.51320875", "0.5130335", "0.5129936", "0.5127549", "0.5125316", "0.5124233", "0.5123922" ]
0.0
-1
Get batch generator `batch_generator` must define a `shape` property that returns the shape of generated sequences as a (n_samples, n_features) tuple. `batch_generator` must define a method called `get_steps_per_epoch` with the signature `def get_steps_per_epoch(self, protocol, subset)` that returns the number of batches to generate before ending an epoch. `batch_generator` may optionally define a method called `callbacks` with the signature `def callbacks(self, extract_embedding=None)` that is expected to return a list of Keras callbacks that will be added to the list of callbacks during training. This might come in handy in case the `batch_generator` depends on the internal state of the model currently being trained.
def get_generator(self, file_generator, batch_size=None, **kwargs): raise NotImplementedError('')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_generator(self, generator: \"DataGenerator\", nb_epochs: int = 20, **kwargs) -> None:\n raise NotImplementedError", "def gen_batch(self):\n batch_size = self.batch_size\n shuffle = self.shuffle\n data = np.array(self.sentences)\n\n data_size = len(data)\n num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\n while True:\n # shuffle the data at starting of each epoch\n shuffled_data = data\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n \n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield self._format_samples(shuffled_data[start_index:end_index], self.max_length)\n\n if self.mode in ['train', \"pred\"]:\n break", "def get_generators(patch_size, batch_size, preprocess_func, output_reshape_func, num_validation, train_processes,\n train_cache, train_data_dir='data/train/'):\n\n dirs = util.get_data_list(train_data_dir)\n labels = util.parse_labels_months()\n train_paths, validation_paths = util.train_validation_split(dirs, labels)\n # generate train batch loader\n train_data_loader = CTBatchLoader(train_paths, batch_size, patch_size, num_threads_in_multithreaded=1,\n preprocess_func=preprocess_func)\n\n train_transforms = get_train_transform(patch_size)\n train_data_generator = MultiThreadedAugmenter(train_data_loader, train_transforms, num_processes=train_processes,\n num_cached_per_queue=train_cache, seeds=None, pin_memory=False)\n\n # wrapper to be compatible with keras\n train_generator_keras = KerasGenerator(train_data_generator, output_reshapefunc=output_reshape_func)\n\n # generate validation batch loader\n valid_data_loader = CTBatchLoader(validation_paths, num_validation, patch_size,\n num_threads_in_multithreaded=1, preprocess_func=preprocess_func)\n valid_transforms = get_valid_transform(patch_size)\n valid_data_generator = MultiThreadedAugmenter(valid_data_loader, valid_transforms, num_processes=1,\n num_cached_per_queue=1, seeds=None, pin_memory=False)\n # wrapper to be compatible with keras\n valid_generator_keras = KerasGenerator(valid_data_generator, output_reshape_func, 1)\n\n return train_generator_keras, valid_generator_keras", "def trainGenerator(self,):\n return tf.data.Dataset.from_generator(self.trainData, \\\n output_types=(tf.float32, tf.float32, tf.float32), \\\n output_shapes=(tf.TensorShape(self.config_model[\"input_shape\"]), \\\n tf.TensorShape(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes), \\\n 7+len(self.config_data[\"all_classes\"])]), \\\n tf.TensorShape([self.config_data[\"max_boxes_per_frame\"], 7]) \\\n ), )", "def setGenerators(self):\n shape = (self.input_shape[0],self.input_shape[1])\n self.trainGen,self.validateGen = getBatchGenerators(self.batch_size,\n self.dataPath,\n shape,\n self.classMap,\n self.regression)", "def get_generator_batch_size(self):\n\n return self.generator_batch_size", "def train_epoch(model,\n\t \ttrain_generator,\n\t \toptimizer,\n\t \tcallback=None):\n model.train()\n for it, (batch_of_x, batch_of_y) in enumerate(train_generator):\n train_on_batch(model, batch_of_x, batch_of_y, optimizer)\n\n if callback is not None:\n callback(model)\n return", "def batch_generator(batch_size, sequence_length,\n x_train_scaled, y_train_scaled, num_x_signals, num_y_signals, num_train):\n # Infinite loop.\n while True:\n # Allocate a new array for the batch of input-signals.\n x_shape = (batch_size, sequence_length, num_x_signals)\n x_batch = np.zeros(shape=x_shape, dtype=np.float16)\n\n # Allocate a new array for the batch of output-signals.\n y_shape = (batch_size, sequence_length, num_y_signals)\n y_batch = np.zeros(shape=y_shape, dtype=np.float16)\n\n # Fill the batch with random sequences of data.\n for i in range(batch_size):\n # Get a random start-index.\n # This points somewhere into the training-data.\n idx = np.random.randint(num_train - sequence_length)\n\n # Copy the sequences of data starting at this index.\n x_batch[i] = x_train_scaled[idx:idx + sequence_length]\n y_batch[i] = y_train_scaled[idx:idx + sequence_length]\n yield x_batch, y_batch\n # return x_batch, y_batch", "def batch_generator(batch_size):\n\n # Infinite loop.\n while True:\n # Get a list of random indices for images in the training-set.\n idx = np.random.randint(100,size=batch_size)\n \n # Get the pre-computed transfer-values for those images.\n # These are the outputs of the pre-trained image-model.\n transf_values = np.array([transfer_values[_] for _ in idx])\n\n # For each of the randomly chosen images there are\n # at least 5 captions describing the contents of the image.\n # Select one of those captions at random and get the\n # associated sequence of integer-tokens.\n tokens = [caps_markedwords[_] for _ in idx]\n\n # Count the number of tokens in all these token-sequences.\n num_tokens = [len(t) for t in tokens]\n \n # Max number of tokens.\n max_tokens = np.max(num_tokens)\n # Pad all the other token-sequences with zeros\n # so they all have the same length and can be\n # input to the neural network as a numpy array.\n tokens_padded = pad_sequences(tokens,\n maxlen=max_tokens,\n padding='post',\n truncating='post')\n \n # Further prepare the token-sequences.\n # The decoder-part of the neural network\n # will try to map the token-sequences to\n # themselves shifted one time-step.\n decoder_input_data = tokens_padded[:, 0:-1]\n decoder_output_data = tokens_padded[:, 1:]\n\n # Dict for the input-data. Because we have\n # several inputs, we use a named dict to\n # ensure that the data is assigned correctly.\n x_data = \\\n {\n 'decoder_input': decoder_input_data,\n 'transfer_values_input': transf_values\n }\n\n\n # Dict for the output-data.\n y_data = \\\n {\n 'decoder_output': decoder_output_data\n }\n \n yield (x_data, y_data)", "def trainingBatchGenerator(data_folder, image_shape):\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n background_color = np.array([255, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n return get_batches_fn", "def batch_train_generator(self, X, batch_size, seq_len):\n startidx = np.random.randint(0, len(X) - seq_len, batch_size)\n while True:\n batch_X = np.array([X[start:start + seq_len]\n for start in startidx])\n batch_y = np.array(\n [X[start:start + seq_len + self.config.shift] for start in startidx])\n batch_y = batch_y[:, -1]\n startidx = (startidx + seq_len) % (len(X) - seq_len)\n yield batch_X.reshape(batch_size, seq_len, 1), batch_y.reshape(batch_size, 1)", "def get_data_generator(train_data, validation_data):\n\n def batch_generator(mode=\"train\", batch_size=100):\n assert mode in [\"train\", \"val\"], \"The mode should be in {train, val}.\"\n if mode == \"train\":\n data = train_data.copy()\n elif mode == \"val\":\n data = validation_data.copy()\n\n while True:\n indices = np.random.permutation(np.arange(len(data)))\n data = data[indices]\n\n for i in range(len(data) // batch_size):\n yield data[i * batch_size:(i + 1) * batch_size]\n\n return batch_generator", "def nn_batch_generator(self, x_train):\n # Shuffle the batch\n np.random.seed(self.seed)\n shuffle_index = np.arange(np.shape(x_train)[0])\n np.random.shuffle(shuffle_index)\n x = x_train[shuffle_index, :]\n y = x_train[shuffle_index, :]\n\n # Iterate until making a full epoch\n counter = 0\n while 1:\n index_batch = shuffle_index[\n self.batch_size * counter : self.batch_size * (counter + 1)\n ]\n # Decompress batch\n x_batch = x[index_batch, :]\n y_batch = y[index_batch, :]\n counter += 1\n yield (np.array(x_batch), np.array(y_batch))\n\n # Stopping rule\n if counter >= self.number_of_batches:\n counter = 0", "def generate_validation_batch(self):\n assert self.validation_dataset is not None\n assert self.data_tags is not None\n \n # Sample indices and get data\n index_array = np.random.choice(self.num_validation_samples, self.p.trainer.batch_size)\n return self.get_data_from_indices(self.validation_dataset, index_array)", "def train_step(self, batch, generator):\n ##\n # Split into inputs and outputs\n ##\n\n input_frames = batch[:, :, :, :-3]\n gt_output_frames = batch[:, :, :, -3:]\n\n ##\n # Train\n ##\n\n feed_dict = self.build_feed_dict(input_frames, gt_output_frames, generator)\n\n _, global_loss, global_step, summaries = self.sess.run(\n [self.train_op, self.global_loss, self.global_step, self.summaries],\n feed_dict=feed_dict)\n\n ##\n # User output\n ##\n\n if global_step % c.STATS_FREQ == 0:\n print 'DiscriminatorModel: step %d | global loss: %f' % (global_step, global_loss)\n if global_step % c.SUMMARY_FREQ == 0:\n print 'DiscriminatorModel: saved summaries'\n self.summary_writer.add_summary(summaries, global_step)\n\n return global_step", "def train_generator(batch_size):\n\n # reset gradients\n g_solver.zero_grad()\n\n # predict on fake data\n noise = torch.randn(batch_size, Z, 1, 1, device=device)\n prediction = C(G(noise)).view(-1)\n\n # perform back propagation\n # implemenation of loss learned from Pytorch functionality learned from https://wiseodd.github.io/techblog/2017/02/04/wasserstein-gan/\n loss = -torch.mean(prediction)\n loss.backward()\n\n # adjust weights\n g_solver.step()\n\n return loss, prediction.mean().item()", "def predict_batch_generator(self):\n input = np.zeros((self.batch_size, self.max_seq_len,\n self.embedding_size))\n seq_lengths = np.zeros((self.batch_size), dtype=np.intp)\n unique_counts = np.zeros((self.batch_size), dtype=np.intp)\n i = 0\n\n fi = open(self.config.parsed_predict_file)\n sample_gen = self.predict_sample_generator(fi)\n self.load_embedding()\n\n for sequence, seq_length, unique_count in sample_gen:\n seq_lengths[i], unique_counts[i] = seq_length, unique_count\n if seq_lengths[i] > self.max_seq_len:\n seq_lengths[i] = self.max_seq_len\n sequence = sequence[:seq_lengths[i]]\n input[i, 0:seq_lengths[i], :] = self.embedding[sequence, :]\n\n i += 1\n\n if i == self.batch_size:\n yield input, seq_lengths, unique_counts\n input = np.zeros(\n (self.batch_size, self.max_seq_len,\n self.embedding_size)\n )\n i = 0\n\n if i < self.batch_size:\n yield input[:i, :, :], seq_lengths[:i], unique_counts[:i]\n\n fi.close()", "def list_batch_kwargs_generators(self):\n generators = []\n\n if \"batch_kwargs_generators\" in self._datasource_config:\n for key, value in self._datasource_config[\n \"batch_kwargs_generators\"\n ].items():\n generators.append({\"name\": key, \"class_name\": value[\"class_name\"]})\n\n return generators", "def fit(self, training_generator, dimension_train, val_generator, dimension_val):\n self.model.fit_generator(generator=training_generator,\n steps_per_epoch=dimension_train // self.batch_size,\n epochs=self.epochs,\n verbose=1,\n callbacks=self.cb,\n validation_data=val_generator,\n validation_steps=dimension_val//self.batch_size,\n class_weight=None,\n max_queue_size=10,\n workers=multiprocessing.cpu_count(),\n use_multiprocessing=False,\n shuffle=True,\n initial_epoch=0)", "def fit_generator(self, generator, nb_epochs=20, **kwargs):\n from art.data_generators import KerasDataGenerator\n\n # Try to use the generator as a Keras native generator, otherwise use it through the `DataGenerator` interface\n if isinstance(generator, KerasDataGenerator) and not hasattr(self, 'defences'):\n try:\n self._model.fit_generator(generator.generator, epochs=nb_epochs, **kwargs)\n except ValueError:\n logger.info('Unable to use data generator as Keras generator. Now treating as framework-independent.')\n super(KerasClassifier, self).fit_generator(generator, nb_epochs=nb_epochs, **kwargs)\n else:\n super(KerasClassifier, self).fit_generator(generator, nb_epochs=nb_epochs, **kwargs)", "def get_batch(self, batch_kwargs, batch_parameters=None) -> None:\n raise NotImplementedError", "def get_batch_kwargs_generator(self, name):\n if name in self._batch_kwargs_generators:\n return self._batch_kwargs_generators[name]\n elif (\n \"batch_kwargs_generators\" in self._datasource_config\n and name in self._datasource_config[\"batch_kwargs_generators\"]\n ):\n generator_config = copy.deepcopy(\n self._datasource_config[\"batch_kwargs_generators\"][name]\n )\n else:\n raise ValueError(\n f\"Unable to load batch kwargs generator {name} -- no configuration found or invalid configuration.\"\n )\n generator = self._build_batch_kwargs_generator(**generator_config)\n self._batch_kwargs_generators[name] = generator\n return generator", "def get_batch_inputs(self, inputs, batch_size=None):\n total_num = inputs.shape[0]\n batch_size = batch_size or self.batch_size\n for i in range(0, total_num, batch_size):\n yield inputs[i:i + batch_size]", "def get_batch(self, data, batch_size=None):\n if batch_size is None:\n # randomly generate a batch for training\n batch_size = self.batch_size\n random_sample = True\n else:\n # convert the whole 'data' into a batch\n # useful in validation or testing\n random_sample = False\n encoder_size, decoder_size = self.encoder_size, self.decoder_size\n # encoder_size = max([len(encoder_input) for encoder_input, _ in data])\n # decoder_size = max([len(decoder_input) for _, decoder_input in data])\n (batch_encoder_inputs, batch_decoder_inputs,\n encoder_sequence_length, decoder_sequence_length) = [], [], [], []\n\n for sample_id in xrange(batch_size):\n if random_sample:\n encoder_input, decoder_input = random.choice(data)\n else:\n encoder_input, decoder_input = data[sample_id]\n encoder_sequence_length.append(len(encoder_input))\n # add 1 for _Go\n decoder_sequence_length.append(len(decoder_input) + 1)\n\n # Encoder inputs are padded.\n encoder_pad = ([data_utils.PAD_ID] *\n (encoder_size - len(encoder_input)))\n batch_encoder_inputs.append(encoder_input + encoder_pad)\n\n # Decoder inputs get an extra \"GO\" symbol, and are padded then.\n decoder_pad_size = decoder_size - len(decoder_input) - 1\n batch_decoder_inputs.append([data_utils.GO_ID] + decoder_input +\n [data_utils.PAD_ID] * decoder_pad_size)\n\n # Here the assumption is that data_utils._PAD = 0\n batch_targets = np.zeros([batch_size, decoder_size], dtype=np.int32)\n batch_weights = np.zeros([batch_size, decoder_size], dtype=np.float32)\n for length_idx in xrange(decoder_size):\n # Create target_weights to be 0 for targets that are padding.\n for batch_idx in xrange(batch_size):\n # We set weight to 0 if the corresponding target is a\n # PAD symbol.\n # The corresponding target is decoder_input shifted by\n # 1 forward.\n if length_idx < decoder_size - 1:\n batch_targets[batch_idx][length_idx] = \\\n batch_decoder_inputs[batch_idx][length_idx + 1]\n if (length_idx < decoder_size - 1 and\n batch_targets[batch_idx, length_idx] != data_utils.PAD_ID):\n batch_weights[batch_idx][length_idx] = 1.0\n return (batch_encoder_inputs, batch_decoder_inputs,\n batch_targets, batch_weights,\n encoder_sequence_length, decoder_sequence_length)", "def validateGenerator(self,):\n return tf.data.Dataset.from_generator(self.validateData, \\\n output_types=(tf.float32, tf.float32, tf.float32), \\\n output_shapes=(tf.TensorShape(self.config_model[\"input_shape\"]), \\\n tf.TensorShape(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes), \\\n 7+len(self.config_data[\"all_classes\"])]), \\\n tf.TensorShape([self.config_data[\"max_boxes_per_frame\"], 7]) \\\n ), )", "def predict_generator(self, generator, dimension_generator):\n return self.model.predict_generator(generator, steps=dimension_generator//self.batch_size, max_queue_size=10, workers=1,\n use_multiprocessing=True, verbose=0)", "def get_batch(self, batch_size, preprocessing_fn=None, mode='training', drop_long_sequences=False):\n data_file_patterns = self.dataset.get_data_filepatterns(mode=mode)\n dataset_r = self.decoder.examples_reader([data_file_patterns], bool(mode == 'training'),\n self.capacity)\n batching_scheme = self._batching_scheme(\n batch_size=batch_size,\n max_length=self.max_length,\n min_length_bucket=self.min_bucket_length,\n length_bucket_step=self.length_bucket_step,\n drop_long_sequences=drop_long_sequences,\n shard_multiplier=self.shard_multiplier,\n length_multiplier=self.length_multiplier)\n\n with tf.name_scope(\"input_pipeline\"):\n if preprocessing_fn is not None:\n dataset_r = dataset_r.map(\n lambda ex: preprocessing_fn(ex, mode), num_threads=self.num_threads)\n dataset_r = dataset_r.filter(\n lambda ex: self._example_too_big(ex, batching_scheme[\"max_length\"]))\n\n dataset_r = self.bucket_by_sequence_length(\n dataset_r, self._example_length, batching_scheme[\"boundaries\"],\n batching_scheme[\"batch_sizes\"], batching_scheme[\"window_size\"])\n # We reshuffle the batches to prevent many long-sequence batches at once.\n if batching_scheme[\"shuffle_queue_size\"] is not None:\n dataset_r = dataset_r.shuffle(batching_scheme[\"shuffle_queue_size\"])\n batched_examples = dataset_r.make_one_shot_iterator().get_next()\n return batched_examples", "def get_test_generator(patch_size, batch_size, preprocess_func, output_reshape_func, test_data_dir='data/test/'):\n\n test_paths = util.get_data_list(test_data_dir)\n\n # generate train batch loader\n test_data_loader = CTBatchLoader(test_paths, batch_size, patch_size, num_threads_in_multithreaded=1,\n preprocess_func=preprocess_func, infinite=False)\n\n # wrapper to be compatible with keras\n return KerasGenerator(test_data_loader, output_reshapefunc=output_reshape_func,\n n=int(len(test_data_loader.indices) / batch_size))", "def create_batch(client, generator: DataGenerator):\n try:\n event_data_batch = client.create_batch()\n for device in generator.devices:\n # event_data_batch.add(EventData(gen.generate_payload(device)))\n event_data_batch.add(EventData(generator.generate_payload(device)))\n return event_data_batch\n except Exception as e:\n print(str(e))", "def generator (self) -> tf.keras.Sequential:\n return self._generator", "def create_generator_params(batch_size: int, samples_per_epoch: int, n_samples: int,\n epochs: int) -> Tuple[int]:\n steps_per_epoch = samples_per_epoch // batch_size\n n_epochs = numpy.ceil(epochs * n_samples / samples_per_epoch)\n return steps_per_epoch, n_epochs", "def iter_batches(self,\n data_generator: Optional[\n Iterable[ArrayTupleOrList]] = None,\n limit: Optional[int] = None,\n count: Optional[int] = None,\n ) -> BatchGenerator:\n # check the context\n if not self._stage.is_active:\n raise RuntimeError('The loop context must be entered before '\n 'calling `iter_batches()`.')\n if self._stage.batch.is_active:\n raise RuntimeError('`iter_batches()` cannot be called when a '\n 'batch is currently running.')\n\n # check the arguments\n if count is not None and limit is not None:\n raise ValueError('`count` and `limit` cannot be both specified.')\n\n # we do not allow infinite loop\n if data_generator is None and count is None and limit is None and \\\n self._stage.batch.total is None:\n raise ValueError(\n 'Any one of `data_generator`, `limit` or `count` is required '\n 'to be specified when `max_batch` is not configured for '\n 'the loop.')\n\n return self._iter_batches(\n data_generator=data_generator,\n limit=limit,\n count=count,\n )", "def build_train_generator(X: numpy.array, y: numpy.array,\n batch_size: int = 500) -> Iterable[Tuple[numpy.array]]:\n assert X.shape[0] == y.shape[0], \"Number of samples mismatch in X and y.\"\n\n def xy_generator():\n while True:\n n_batches = X.shape[0] // batch_size\n if n_batches * batch_size < X.shape[0]:\n n_batches += 1 # to yield last samples\n for i in range(n_batches):\n start = i * batch_size\n end = min((i + 1) * batch_size, X.shape[0])\n yield X[start:end], y[start:end]\n return xy_generator()", "def evaluate_generator(self, generator,\n steps=None,\n callbacks=None,\n max_queue_size=10,\n workers=0,\n use_multiprocessing=False,\n verbose=0):\n \n for model in self.parallel_train_models.values():\n model._make_test_function()\n model.reset_metrics()\n \n steps_done = 0\n outs_per_batch = []\n batch_sizes = []\n# self.tb_data = []\n use_sequence_api = is_sequence(generator)\n if not use_sequence_api and use_multiprocessing and workers > 1:\n warnings.warn(\n UserWarning('Using a generator with `use_multiprocessing=True`'\n ' and multiple workers may duplicate your data.'\n ' Please consider using the `keras.utils.Sequence'\n ' class.'))\n if steps is None:\n if use_sequence_api:\n steps = len(generator)\n else:\n raise ValueError('`steps=None` is only valid for a generator'\n ' based on the `keras.utils.Sequence` class.'\n ' Please specify `steps` or use the'\n ' `keras.utils.Sequence` class.')\n enqueuer = None\n\n # Check if callbacks have not been already configured\n if not isinstance(callbacks, cbks.CallbackList):\n callbacks = cbks.CallbackList(callbacks)\n callback_model = model._get_callback_model()\n callbacks.set_model(callback_model)\n callback_metrics = list(model.metrics_names)\n callback_params = {\n 'steps': steps,\n 'verbose': verbose,\n 'metrics': callback_metrics,\n }\n callbacks.set_params(callback_params)\n\n callbacks.model.stop_training = False\n callbacks._call_begin_hook('test')\n\n try:\n if workers > 0:\n if use_sequence_api:\n enqueuer = OrderedEnqueuer(\n generator,\n use_multiprocessing=use_multiprocessing)\n else:\n enqueuer = GeneratorEnqueuer(\n generator,\n use_multiprocessing=use_multiprocessing)\n enqueuer.start(workers=workers, max_queue_size=max_queue_size)\n output_generator = enqueuer.get()\n else:\n if use_sequence_api:\n output_generator = iter_sequence_infinite(generator)\n else:\n output_generator = generator\n\n if verbose == 1:\n progbar = Progbar(target=steps)\n\n while steps_done < steps:\n generator_output = next(output_generator)\n if not hasattr(generator_output, '__len__'):\n raise ValueError('Output of generator should be a tuple '\n '(x, y, sample_weight) '\n 'or (x, y). Found: ' +\n str(generator_output))\n if len(generator_output) == 2:\n x, y = generator_output\n sample_weight = None\n elif len(generator_output) == 3:\n x, y, sample_weight = generator_output\n else:\n raise ValueError('Output of generator should be a tuple '\n '(x, y, sample_weight) '\n 'or (x, y). Found: ' +\n str(generator_output))\n if x is None or len(x) == 0:\n # Handle data tensors support when no input given\n # step-size = 1 for data tensors\n batch_size = 1\n elif isinstance(x, list):\n batch_size = x[0].shape[0]\n elif isinstance(x, dict):\n batch_size = list(x.values())[0].shape[0]\n else:\n batch_size = x.shape[0]\n if batch_size == 0:\n raise ValueError('Received an empty batch. '\n 'Batches should contain '\n 'at least one item.')\n\n batch_logs = {'batch': steps_done, 'size': batch_size}\n callbacks._call_batch_hook('test', 'begin', steps_done, batch_logs)\n outs = self.test_on_batch(x, y,\n sample_weight=sample_weight,\n reset_metrics=False)\n outs = to_list(outs)\n outs_per_batch.append(outs)\n \n for l, o in zip(model.metrics_names, outs):\n batch_logs[l] = o\n callbacks._call_batch_hook('test', 'end', steps_done, batch_logs)\n\n steps_done += 1\n batch_sizes.append(batch_size)\n\n if verbose == 1:\n progbar.update(steps_done)\n \n# self.tb_data.append([x, y])\n callbacks._call_end_hook('test')\n finally:\n if enqueuer is not None:\n enqueuer.stop()\n\n averages = [float(outs_per_batch[-1][0])] # index 0 = 'loss'\n for i in range(1, len(outs)):\n averages.append(np.float64(outs_per_batch[-1][i]))\n return unpack_singleton(averages)", "def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None, batch_size=1):\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n\n # Keras requires a generator to run indefinately.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n image, gt_class_ids = load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation)\n\n # Init batch arrays\n if b == 0:\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n\n # Add to batch\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, gt_class_ids] = 1\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_gt_class_ids]\n outputs = []\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise", "def train_batch_generator(self):\n seq_lengths = np.zeros((self.batch_size), dtype=np.intp)\n unique_count = np.zeros((self.batch_size), dtype=np.intp)\n fis = (self.config.train_dir + \"pos.txt\",\n self.config.train_dir + \"neg.txt\")\n fi_pos, fi_neg = map(open, fis)\n sample_gen_pos, sample_gen_neg = map(\n lambda fi: self.train_sample_generator(fi),\n (fi_pos, fi_neg)\n )\n self.load_embedding()\n\n while True:\n input = np.zeros((self.batch_size, self.max_seq_len,\n self.embedding_size))\n labels = np.random.choice([0, 1], self.batch_size,\n p=self.config.class_probs)\n for i in range(self.batch_size):\n if labels[i] == 1:\n sequence, seq_lengths[i], unique_count[i] = next(sample_gen_pos)\n else:\n sequence, seq_lengths[i], unique_count[i] = next(sample_gen_neg)\n\n if seq_lengths[i] > self.max_seq_len:\n seq_lengths[i] = self.max_seq_len\n sequence = sequence[:seq_lengths[i]]\n input[i, 0:seq_lengths[i], :] = self.embedding[sequence, :]\n yield input, seq_lengths, unique_count, labels\n\n map(lambda fi: fi.close(), (fi_pos, fi_neg))", "def get_batch_gen(self, config):\n\n ################\n # Def generators\n ################\n\n def random_balanced_gen():\n print('trying to generate batch series with ', self.num_train, 'shapes')\n\n # Initiate concatenation lists\n tp_list = [] # points\n tev_list = [] # eigen vectors\n tevt_list = [] # transposed eigen vectors\n tv_list = [] # eigen values\n tevf_list = [] # full eigen vectors for ground truth maps\n ti_list = [] # cloud indices\n\n batch_n = 0\n i_batch = 0\n\n gen_indices = np.random.permutation(int(self.num_train)) # initiate indices for the generator\n # if we had to test on this dataset we would need to introduce a test/val case with non-shuffled indices\n # print(gen_indices.shape, config.batch_num)\n # if config.split == 'test':\n # print('test setting here not fully supported')\n # n_shapes = self.num_test # has to be defined\n # gen_indices = []\n # for i in range(n_shapes - 1):\n # for j in range(i + 1, n_shapes):\n # gen_indices += [i, j] # put all the pairs in order\n # gen_indices = np.array(gen_indices)\n\n\n # Generator loop\n for p_i in gen_indices:\n\n # Get points and other input data\n new_points = self.input_points[p_i]\n new_evecs = self.input_evecs[p_i][:, :self.neig]\n new_evecs_trans = self.input_evecs_trans[p_i][:self.neig, :]\n new_evals = self.input_evals[p_i][:self.neig]\n\n new_evecs_full = self.input_evecs_full[p_i][:, :self.neig]\n\n n = new_points.shape[0]\n\n if i_batch == config.batch_num:\n\n yield (np.concatenate(tp_list, axis=0),\n np.concatenate(tev_list, axis=0),\n np.concatenate(tevt_list, axis=1),\n np.concatenate(tv_list, axis=1),\n np.concatenate(tevf_list, axis=0),\n np.array(ti_list, dtype=np.int32),\n np.array([tp.shape[0] for tp in tp_list]))\n\n tp_list = []\n tev_list = []\n tevt_list = []\n tv_list = []\n tevf_list = []\n ti_list = []\n\n batch_n = 0\n i_batch = 0\n\n # Add data to current batch\n tp_list += [new_points]\n tev_list += [new_evecs]\n tevt_list += [new_evecs_trans]\n tv_list += [new_evals]\n tevf_list += [new_evecs_full]\n ti_list += [p_i]\n\n # Update batch size\n batch_n += n\n i_batch += 1\n\n # yield the rest if necessary (it will not be a full batch and could lead to mistakes because of\n # shape matching needing pairs !!!!)\n yield (np.concatenate(tp_list, axis=0),\n np.concatenate(tev_list, axis=0),\n np.concatenate(tevt_list, axis=1),\n np.concatenate(tv_list, axis=1),\n np.concatenate(tevf_list, axis=0),\n np.array(ti_list, dtype=np.int32),\n np.array([tp.shape[0] for tp in tp_list]))\n\n ##################\n # Return generator\n ##################\n\n # Generator types and shapes\n gen_types = (tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32, tf.int32)\n gen_shapes = ([None, 3], [None, self.neig],\n [self.neig, None], [self.neig, None], [None, self.neig], [None], [None])\n\n return random_balanced_gen, gen_types, gen_shapes", "def fit_generator(self, generator,\n steps_per_epoch=None,\n epochs=1,\n verbose=1,\n validation_data=None,\n validation_steps=None,\n validation_freq=1,\n class_weight=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n shuffle=True,\n initial_epoch=0,\n warm_start=False,\n warm_start_model=None,\n save_frequency=None):\n \n val_workers = 0\n val_multiprocessing = False\n \n if warm_start:\n with open('./%s/hist.json' % (warm_start_model), 'r') as f:\n history = json.load(f)\n try:\n trained_epoch = int(history['epochs'][-1])\n if np.isnan(trained_epoch):\n trained_epoch = int(history['epochs'][-2])\n except:\n trained_epoch = len(list(history.values())[0])\n epochs += trained_epoch\n epoch = initial_epoch+trained_epoch\n self.load(warm_start_model)\n self.log.info('Load %d epoch trained weights from %s' % (trained_epoch, warm_start_model))\n else:\n epoch = initial_epoch\n\n do_validation = bool(validation_data)\n for model in self.parallel_train_models.values():\n model._make_train_function()\n if do_validation:\n for model in self.parallel_train_models.values():\n model._make_test_function()\n\n use_sequence_api = is_sequence(generator)\n if not use_sequence_api and use_multiprocessing and workers > 1:\n warnings.warn(\n UserWarning('Using a generator with `use_multiprocessing=True`'\n ' and multiple workers may duplicate your data.'\n ' Please consider using the `keras.utils.Sequence'\n ' class.'))\n\n # if generator is instance of Sequence and steps_per_epoch are not provided -\n # recompute steps_per_epoch after each epoch\n recompute_steps_per_epoch = use_sequence_api and steps_per_epoch is None\n\n if steps_per_epoch is None:\n if use_sequence_api:\n steps_per_epoch = len(generator)\n else:\n raise ValueError('`steps_per_epoch=None` is only valid for a'\n ' generator based on the '\n '`keras.utils.Sequence`'\n ' class. Please specify `steps_per_epoch` '\n 'or use the `keras.utils.Sequence` class.')\n\n val_use_sequence_api = is_sequence(validation_data)\n val_gen = (hasattr(validation_data, 'next') or\n hasattr(validation_data, '__next__') or\n val_use_sequence_api)\n if (val_gen and not val_use_sequence_api and\n not validation_steps):\n raise ValueError('`validation_steps=None` is only valid for a'\n ' generator based on the `keras.utils.Sequence`'\n ' class. Please specify `validation_steps` or use'\n ' the `keras.utils.Sequence` class.')\n\n # Prepare display labels.\n out_labels = self.metrics_names\n callback_metrics = out_labels + ['val_' + n for n in out_labels]\n\n # prepare callbacks\n callbacks = self.get_callbacks()\n \n self.history = cbks.History()\n _callbacks = [cbks.BaseLogger(\n stateful_metrics=self.metrics_names[1:])]\n if verbose:\n _callbacks.append(\n cbks.ProgbarLogger(\n count_mode='steps',\n stateful_metrics=self.metrics_names[1:]))\n _callbacks += (callbacks or []) + [self.history]\n callbacks = cbks.CallbackList(_callbacks)\n\n # TODO\n# # it's possible to callback a different model than self:\n# callback_model = model._get_callback_model()\n\n callback_model = self\n callbacks.set_model(callback_model)\n callbacks.set_params({\n 'epochs': epochs,\n 'steps': steps_per_epoch,\n 'verbose': verbose,\n 'do_validation': do_validation,\n 'metrics': callback_metrics,\n })\n \n self.tb_data = None\n \n ##############################################################\n # On train begin\n callbacks._call_begin_hook('train')\n self.on_train_begin(self.get_reference_images(generator))\n \n # Pretrain model\n# if self.pretrain: self.pretrain_fit(generator)\n ##############################################################\n \n enqueuer = None\n val_enqueuer = None\n\n try:\n if do_validation:\n val_data = validation_data\n# if val_gen and val_workers > 0:\n# # Create an Enqueuer that can be reused\n# val_data = validation_data\n# if is_sequence(val_data):\n# val_enqueuer = OrderedEnqueuer(\n# val_data,\n# use_multiprocessing=val_multiprocessing)\n# validation_steps = validation_steps or len(val_data)\n# else:\n# val_enqueuer = GeneratorEnqueuer(\n# val_data,\n# use_multiprocessing=val_multiprocessing)\n# val_enqueuer.start(workers=val_workers,\n# max_queue_size=max_queue_size)\n# val_enqueuer_gen = val_enqueuer.get()\n# elif val_gen:\n# val_data = validation_data\n# if is_sequence(val_data):\n# print('isinfinite true')\n# val_enqueuer_gen = iter_sequence_infinite(val_data)\n# validation_steps = validation_steps or len(val_data)\n# else:\n# val_enqueuer_gen = val_data\n# else:\n# # Prepare data for validation\n# if len(validation_data) == 2:\n# val_x, val_y = validation_data\n# val_sample_weight = None\n# elif len(validation_data) == 3:\n# val_x, val_y, val_sample_weight = validation_data\n# else:\n# raise ValueError('`validation_data` should be a tuple '\n# '`(val_x, val_y, val_sample_weight)` '\n# 'or `(val_x, val_y)`. Found: ' +\n# str(validation_data))\n# val_x, val_y, val_sample_weights = model._standardize_user_data(\n# val_x, val_y, val_sample_weight)\n# val_data = val_x + val_y + val_sample_weights\n# if model.uses_learning_phase and not isinstance(K.learning_phase(),\n# int):\n# val_data += [0.]\n for cbk in callbacks:\n cbk.validation_data = val_data\n\n\n if workers > 0:\n if use_sequence_api:\n enqueuer = OrderedEnqueuer(\n generator,\n use_multiprocessing=use_multiprocessing,\n shuffle=shuffle)\n else:\n enqueuer = GeneratorEnqueuer(\n generator,\n use_multiprocessing=use_multiprocessing)\n enqueuer.start(workers=workers, max_queue_size=max_queue_size)\n output_generator = enqueuer.get()\n else:\n if use_sequence_api:\n output_generator = iter_sequence_infinite(generator)\n else:\n output_generator = generator\n\n callbacks.model.stop_training = False\n \n if self.parallel_train_models == None:\n raise ValueError('No compiled training models')\n \n # Construct epoch logs.\n epoch_logs = {}\n while epoch < epochs:\n for model in self.parallel_train_models.values():\n model.reset_metrics()\n callbacks.on_epoch_begin(epoch)\n steps_done = 0\n batch_index = 0\n while steps_done < steps_per_epoch:\n# print('step start ---%d---' % steps_done)\n generator_output = next(output_generator)\n# print('generator done')\n \n if not hasattr(generator_output, '__len__'):\n raise ValueError('Output of generator should be '\n 'a tuple `(x, y, sample_weight)` '\n 'or `(x, y)`. Found: ' +\n str(generator_output))\n\n if len(generator_output) == 2:\n x, y = generator_output\n sample_weight = None\n elif len(generator_output) == 3:\n x, y, sample_weight = generator_output\n else:\n raise ValueError('Output of generator should be '\n 'a tuple `(x, y, sample_weight)` '\n 'or `(x, y)`. Found: ' +\n str(generator_output))\n if x is None or len(x) == 0:\n # Handle data tensors support when no input given\n # step-size = 1 for data tensors\n batch_size = 1\n elif isinstance(x, list):\n batch_size = x[0].shape[0]\n elif isinstance(x, dict):\n batch_size = list(x.values())[0].shape[0]\n else:\n batch_size = x.shape[0]\n # build batch logs\n \n# print('generator %s' % str(x[0].shape))\n \n batch_logs = {'batch': batch_index, 'size': batch_size}\n \n callbacks.on_batch_begin(batch_index, batch_logs)\n \n# print('before out')\n \n outs = self.train_on_batch(x, y,\n sample_weight=sample_weight,\n class_weight=class_weight,\n reset_metrics=False)\n\n outs = to_list(outs)\n \n# print(' outs[0]=%f'%outs[0])\n \n for l, o in zip(out_labels, outs):\n batch_logs[l] = o\n \n callbacks._call_batch_hook('train', 'end', batch_index, batch_logs)\n# generator.on_batch_end()\n \n batch_index += 1\n steps_done += 1\n \n# if workers > 0:\n# enqueuer.join_end_of_epoch()\n# print('step done ---%d---' % steps_done)\n# sys.stdout.flush()\n \n# ######################################################################################\n if callback_model.stop_training:\n break\n\n ######################################################################################\n # Epoch finished.\n# print('before join')\n# sys.stdout.flush()\n \n if workers > 0:\n enqueuer.join_end_of_epoch()\n \n# print('after join')\n# sys.stdout.flush()\n \n if (do_validation and should_run_validation(validation_freq, epoch)):\n # Note that `callbacks` here is an instance of\n # `keras.callbacks.CallbackList`\n if val_gen:\n val_outs = self.evaluate_generator(\n# val_enqueuer_gen,\n val_data,\n validation_steps,\n callbacks=callbacks,\n# callbacks=None,\n use_multiprocessing=False,\n workers=0)\n else:\n val_outs = self.evaluate(\n val_x, val_y,\n batch_size=batch_size,\n sample_weight=val_sample_weights,\n callbacks=callbacks,\n verbose=0)\n val_outs = to_list(val_outs)\n # Same labels assumed.\n for l, o in zip(out_labels, val_outs):\n epoch_logs['val_' + l] = o\n \n sys.stdout.flush()\n \n callback_data = val_data[0]\n for cbk in callbacks:\n cbk.validation_data = [callback_data]\n callbacks.on_epoch_end(epoch, epoch_logs)\n \n self.on_epoch_end(epoch)\n \n if callback_model.stop_training:\n break\n \n epoch += 1\n \n if use_sequence_api and workers == 0:\n generator.on_epoch_end()\n if val_gen: val_data.on_epoch_end()\n\n if recompute_steps_per_epoch:\n# if workers > 0:\n# enqueuer.join_end_of_epoch()\n\n # recomute steps per epochs in case if Sequence changes it's length\n steps_per_epoch = len(generator)\n\n # update callbacks to make sure params are valid each epoch\n callbacks.set_params({\n 'epochs': epochs,\n 'steps': steps_per_epoch,\n 'verbose': verbose,\n 'do_validation': do_validation,\n 'metrics': callback_metrics,\n })\n \n if save_frequency is not None:\n if epoch // save_frequency * save_frequency == epoch:\n self.save(filepath=self.model_save_dir, is_compile=False)\n self.save_history(epoch = epoch, verbose=False)\n ######################################################################################\n finally:\n try:\n if enqueuer is not None:\n enqueuer.stop()\n finally:\n if val_enqueuer is not None:\n val_enqueuer.stop()\n# tb_enqueuer_gen.stop()\n\n callbacks._call_end_hook('train')\n \n if self.best_model_save: self.load(self.model_save_dir)\n else: self.save(filepath=self.model_save_dir, is_compile=False)\n return self.history", "def load_training_data_generator(self) -> Generator[Tuple[List[np.ndarray], np.ndarray], None, None]:\n return self._load_generator(config.TRAIN_DIR, True)", "def batch_iter(input_data,batch_size):\r\n batch_ids,batch_mask,batch_segment,batch_label=[],[],[],[]\r\n for features in input_data:\r\n if len(batch_ids) == batch_size:\r\n yield batch_ids,batch_mask,batch_segment,batch_label\r\n batch_ids, batch_mask, batch_segment, batch_label = [], [], [], []\r\n\r\n batch_ids.append(features['input_ids'])\r\n batch_mask.append(features['input_mask'])\r\n batch_segment.append(features['segment_ids'])\r\n batch_label.append(features['label_ids'])\r\n\r\n if len(batch_ids) != 0:\r\n yield batch_ids, batch_mask, batch_segment, batch_label", "def build_validation_iterator(dataset_name, batch_size, prepro_fn):\n dataset, dataset_info = tfds.load(\n dataset_name,\n split=tfds.Split.VALIDATION,\n as_supervised=True,\n with_info=True\n )\n n_samples = dataset_info.splits['validation'].num_examples\n steps_per_epoch = int(math.ceil(n_samples / batch_size))\n if prepro_fn is not None:\n dataset = dataset.map(prepro_fn, num_parallel_calls=AUTOTUNE)\n\n # Batch\n batched_dataset = dataset.padded_batch(\n batch_size,\n get_output_shapes(dataset),\n padding_values=get_padding_values(get_output_types(dataset)),\n drop_remainder=False\n )\n return batched_dataset, steps_per_epoch", "def batch_generator(X, y, num_steps):\n batch_size, batch_length = X.shape\n num_batches = batch_length // num_steps\n for i in range(num_batches):\n yield (X[:, i*num_steps:(i+1)*num_steps],\n y[:, i*num_steps:(i+1)*num_steps])", "def train_generator(self, train, validation=None, epochs=20, class_weight=None):\n history = self.model.fit_generator(\n generator=train, validation_data=validation,\n epochs=epochs, shuffle=True, class_weight=class_weight)\n self.training_history.append(\n ({\"epochs\": epochs, \"class_weight\": class_weight}, history)\n )\n self.data_ids = {\n \"train\": train.dataset.labels,\n \"validation\": validation.dataset.labels if validation else [],\n }\n return history", "def batch_generator(self, num_epochs=1, shuffle=False):\n def parse_fn(tfrecord):\n return parse_mnist_tfrec(\n tfrecord, self.name, self.features_shape, True\n )\n dataset = tf.data.TFRecordDataset(\n self.filenames_list, compression_type=self.compression_type\n )\n if shuffle:\n dataset = dataset.shuffle(buffer_size=256)\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.map(parse_fn).prefetch(self.batch_size)\n dataset = dataset.batch(self.batch_size)\n iterator = dataset.make_one_shot_iterator()\n batch_features, batch_labels = iterator.get_next()\n return batch_features, batch_labels", "def get_batches(self, batch_size):\n if self.data.shape[0] % batch_size != 0:\n raise RuntimeError('num of data tuples is not a multiple of batch size')\n num_batch = self.data.shape[0] // batch_size\n for b in range(num_batch):\n yield self.data[b*batch_size:(b+1)*batch_size, :], \\\n self.target[b*batch_size:(b+1)*batch_size, :]", "def fit_generator(self, x, y, optimizer, loss_function, **kwargs):\n raise NotImplementError('NeuralNetwork.fit_one_batch')", "def _train_batch(self):\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n result = self._batch_iter(source, target, i)\n\n # yield\n yield result", "def on_batch_(\n self,\n super_callback: \"AllennlpWandbCallback\",\n trainer: \"GradientDescentTrainer\",\n batch_inputs: List[TensorDict],\n batch_outputs: List[Dict[str, Any]],\n batch_metrics: Dict[str, Any],\n epoch: int,\n batch_number: int,\n is_training: bool,\n is_primary: bool = True,\n batch_grad_norm: Optional[float] = None,\n **kwargs: Any,\n ) -> None:\n pass", "def build_generator(self):\n # label input\n in_label = Input(shape=(1,))\n # embedding for categorical input\n li = Embedding(self.n_classes, 50)(in_label)\n # linear multiplication\n n_nodes = 7 * 7\n li = Dense(n_nodes)(li)\n # reshape to additional channel\n li = Reshape((7, 7, 1))(li)\n # image generator input\n in_lat = Input(shape=(self.latent_dim,))\n # foundation for 7x7 image\n n_nodes = 128 * 7 * 7\n gen = Dense(n_nodes)(in_lat)\n gen = LeakyReLU(alpha=0.2)(gen)\n gen = Reshape((7, 7, 128))(gen)\n # merge image gen and label input\n merge = Concatenate()([gen, li])\n # upsample to 14x14\n gen = Conv2DTranspose(128, (4,4), strides=(2,2), padding='same')(merge)\n gen = LeakyReLU(alpha=0.2)(gen)\n # upsample to 28x28\n gen = Conv2DTranspose(128, (4,4), strides=(2,2), padding='same')(gen)\n gen = LeakyReLU(alpha=0.2)(gen)\n # output\n out_layer = Conv2D(1, (7,7), activation='tanh', padding='same')(gen)\n # define model\n self.g_model = Model([in_lat, in_label], out_layer)", "def data_generator(delta=1, batch_size=32):\n while True:\n yield generate_samples(delta=delta, n=batch_size)", "def get_batches(dirname,\n gen=keras.preprocessing.image.ImageDataGenerator(),\n shuffle=True,\n batch_size=1,\n target_size=(224, 224),\n class_mode=\"categorical\"):\n return gen.flow_from_directory(dirname,\n shuffle=shuffle,\n batch_size=batch_size,\n target_size=target_size,\n class_mode=class_mode)", "def gen_batches(data, batch_size):\n data = np.array(data)\n\n for i in range(0, data.shape[0], batch_size):\n yield data[i:i+batch_size]", "def get_training_and_validation_generators(data_file, batch_size, n_labels, training_keys_file, validation_keys_file,\n data_split=0.8, overwrite=False):\n training_list, validation_list = get_validation_split(data_file, data_split=data_split, overwrite=overwrite,\n training_file=training_keys_file,\n testing_file=validation_keys_file)\n training_generator = data_generator(data_file, training_list, batch_size=batch_size, n_labels=n_labels)\n validation_generator = data_generator(data_file, validation_list, batch_size=1, n_labels=n_labels)\n # Set the number of training and testing samples per epoch correctly\n num_training_steps = len(training_list)//batch_size\n num_validation_steps = len(validation_list)\n return training_generator, validation_generator, num_training_steps, num_validation_steps", "def _get_batch(self,\n X_train, \n Y_train):\n\n raw_data_length = len(X_train)\n\n # partition raw data into batches and stack them vertically in a data matrix\n batch_partition_length = raw_data_length // self.model_parameters.batch_size\n data_x = np.zeros([self.model_parameters.batch_size, \n batch_partition_length, \n self.model_parameters.input_dimension], \n dtype=np.float32)\n\n data_y = np.zeros([self.model_parameters.batch_size, \n batch_partition_length, \n self.model_parameters.n_classes], \n dtype=np.float32)\n #data_y = np.zeros([batch_size, n_classes], dtype=np.int32)\n \n for i in range(self.model_parameters.batch_size):\n data_x[i] = X_train[batch_partition_length * i:batch_partition_length * (i + 1), :]\n data_y[i] = Y_train[batch_partition_length * i:batch_partition_length * (i + 1),:]\n \n # further divide batch partitions into sequence_length for truncated backprop\n epoch_size = batch_partition_length // self.model_parameters.sequence_length\n\n for i in range(epoch_size):\n x = data_x[:, i * self.model_parameters.sequence_length:(i + 1) * self.model_parameters.sequence_length,:]\n y = data_y[:, i * self.model_parameters.sequence_length:(i + 1) * self.model_parameters.sequence_length,:]\n yield (x, y)", "def get_train_batch_generator(self, size):\n self.shuffle_train()\n while self.train_position + size < len(self.train):\n yield self.unzip_batch(self.train[self.train_position:self.train_position + size])\n self.train_position = self.train_position + size", "def on_batch(\n self,\n trainer: \"GradientDescentTrainer\",\n batch_inputs: List[TensorDict],\n batch_outputs: List[Dict[str, Any]],\n batch_metrics: Dict[str, Any],\n epoch: int,\n batch_number: int,\n is_training: bool,\n is_primary: bool = True,\n batch_grad_norm: Optional[float] = None,\n **kwargs: Any,\n ) -> None:\n super().on_batch(\n trainer,\n batch_inputs,\n batch_outputs,\n batch_metrics,\n epoch,\n batch_number,\n is_training,\n is_primary=is_primary,\n batch_grad_norm=batch_grad_norm,\n )\n\n for sub_callback in self.sub_callbacks:\n sub_callback.on_batch_(\n self,\n trainer,\n batch_inputs,\n batch_outputs,\n batch_metrics,\n epoch,\n batch_number,\n is_training,\n is_primary=is_primary,\n batch_grad_norm=batch_grad_norm,\n )", "def generate(batch, size=32):\n\n # Using the data Augmentation in traning data\n ptrain = 'data224/train'\n pval = 'data224/test'\n\n datagen1 = ImageDataGenerator(\n samplewise_center=True,\n samplewise_std_normalization=True,\n shear_range=0.2,\n zoom_range=0.2,\n rotation_range=90,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True)\n\n datagen2 = ImageDataGenerator(samplewise_center=True,\n samplewise_std_normalization=True,)\n\n train_generator = datagen1.flow_from_directory(\n ptrain,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n validation_generator = datagen2.flow_from_directory(\n pval,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n count1 = 0\n for root, dirs, files in os.walk(ptrain):\n for each in files:\n count1 += 1\n\n count2 = 0\n for root, dirs, files in os.walk(pval):\n for each in files:\n count2 += 1\n\n return train_generator, validation_generator, count1, count2", "def train_generator_MLE(generator, dataloader, gen_opt, epochs):\n for epoch in range(epochs):\n print('epoch %d : \\n' % (epoch + 1), end='')\n sys.stdout.flush()\n total_loss = 0\n\n for i, (input_tensor, target_tensor) in enumerate(dataloader):\n\n input_tensor, target_tensor = input_tensor.to(DEVICE), target_tensor.to(DEVICE)\n\n gen_opt.zero_grad()\n\n loss, _, outputs = generator(input_tensor, target_tensor)\n loss.backward()\n gen_opt.step()\n\n total_loss += loss.data.item()\n\n if (i / dataloader.batch_size) % ceil(\n ceil(len(dataloader) / float(dataloader.batch_size)) / 10.) == 0: # roughly every 10% of an epoch\n print('.', end='')\n sys.stdout.flush()\n\n if i % SAVE_EVERY == 0:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': generator.state_dict(),\n 'optimizer': gen_opt.state_dict(),\n }, filename='generator.m')\n\n if i % EVALUATE_EVERY == 0:\n test_sentence = input_tensor[0, :]\n test_target_sentence = target_tensor[0, :]\n generated_sentence = outputs[0, :]\n\n real_test_sentence = dataloader.dataset.vocabulary.tokens_to_sent(test_sentence)\n real_target_sentence = dataloader.dataset.vocabulary.tokens_to_sent(test_target_sentence)\n\n generated_sentence = dataloader.dataset.vocabulary.tokens_to_sent(generated_sentence)\n\n print(real_test_sentence)\n print('>>')\n print(generated_sentence)\n print('==')\n print(real_target_sentence)\n print('-----------------------------')", "def generate_batch(self, batch_size, split=0):\n data_size = self.split_sizes[split]\n data_offset = self.data_index[split] + self.split_offset[split]\n\n # Variable batch size - ensure model can handle this\n batch_size = min(batch_size, data_size - self.data_index[split])\n\n batch = np.empty(batch_size, dtype=np.int32)\n labels = np.empty(batch_size, dtype=np.int32)\n\n batch[:] = self.data[data_offset: data_offset + batch_size, 0]\n labels[:] = self.data[data_offset: data_offset + batch_size, 1]\n\n self.data_index[split] += batch_size\n\n return batch, labels", "def generate_batch(self, batch_size, rand=None, *args, **kwargs):\n return [\n self.generate_datasets(rand, *args, **kwargs) for _ in range(batch_size)\n ]", "def get_batch_gen(self, split, config):\n config.augment_scale_anisotropic = True\n config.augment_scale_min = 0.9\n config.augment_scale_max = 1.1\n config.augment_noise = 0.001\n config.augment_color = 1.0\n config.augment_rotation = 'vertical'\n\n if split == 'training':\n config.augment_symmetries = [True, False, False]\n else:\n config.augment_symmetries = [False, False, False]\n\n if split == 'training':\n epoch_n = config.epoch_steps * config.batch_size\n elif split == 'validation':\n epoch_n = config.validation_size * config.batch_size\n elif split == 'test':\n epoch_n = config.validation_size * config.batch_size\n else:\n raise ValueError('Split argument in data generator should be \"training\", \"validation\" or \"test\"')\n\n # Initiate potentials for regular generation\n if not hasattr(self, 'potentials'):\n self.potentials = {}\n self.min_potentials = {}\n\n data_split = split\n\n # Reset potentials\n def reset_potentials():\n self.potentials[split] = []\n self.min_potentials[split] = []\n\n for i, tree in enumerate(self.input_trees[data_split]):\n self.potentials[split] += [np.random.rand(tree.data.shape[0]) * 1e-3]\n self.min_potentials[split] += [float(np.min(self.potentials[split][-1]))]\n\n reset_potentials()\n\n def spatially_regular_gen():\n for i in range(epoch_n):\n cloud_ind = int(np.argmin(self.min_potentials[split]))\n point_ind = np.argmin(self.potentials[split][cloud_ind])\n points = np.array(self.input_trees[data_split][cloud_ind].data, copy=False)\n center_point = points[point_ind, :].reshape(1, -1)\n noise = np.random.normal(scale=0.35, size=center_point.shape)\n pick_point = center_point + noise.astype(center_point.dtype)\n\n if config.in_radius > 0:\n input_inds = self.input_trees[split][cloud_ind].query_radius(pick_point, r=config.in_radius)[0]\n else:\n buffer = self.buffer+np.random.randint(0,self.buffer//4)\n if len(points) < self.npoint+buffer:\n input_inds = self.input_trees[split][cloud_ind].query(pick_point, k=len(points))[1][0]\n else:\n input_inds = self.input_trees[split][cloud_ind].query(pick_point, k=self.npoint+buffer)[1][0]\n\n input_inds = self.shuffle_idx(input_inds)\n input_inds = input_inds[:self.npoint]\n\n # Number collected\n n = input_inds.shape[0]\n if n == 0:\n # Reset potentials\n reset_potentials()\n return\n # Safe check for very dense areas\n\n # Update potentials\n dists = np.sum(np.square((points[input_inds] - pick_point).astype(np.float32)), axis=1)\n delta = np.square(1 - dists / np.max(dists))\n self.potentials[split][cloud_ind][input_inds] += delta\n self.min_potentials[split][cloud_ind] = float(np.min(self.potentials[split][cloud_ind]))\n n = input_inds.shape[0]\n\n # Collect points and colors\n input_points = (points[input_inds] - pick_point).astype(np.float32)\n input_colors = self.input_colors[data_split][cloud_ind][input_inds]\n\n if split == 'test':\n input_labels = np.zeros(input_points.shape[0])\n else:\n input_labels = self.input_labels[data_split][cloud_ind][input_inds]\n input_labels = np.array([self.label_to_idx[l] for l in input_labels])\n\n if split in ['test', 'validation']:\n label_weights = np.zeros(input_points.shape[0])\n else:\n label_weights = self.label_weights[input_labels]\n\n if len(input_inds) < self.npoint:\n input_points, input_colors, input_inds, label_weights, input_labels = \\\n self.data_rep(input_points, input_colors, input_labels, input_inds, label_weights, self.npoint)\n\n # Add yield data\n if n > 0:\n yield input_points, np.hstack((input_colors, input_points + pick_point)), input_labels, \\\n [input_points.shape[0]], input_inds, cloud_ind, label_weights\n\n # Define the generator that should be used for this split\n gen_func = spatially_regular_gen\n\n # Define generated types and shapes\n gen_types = (tf.float32, tf.float32, tf.int32, tf.int32, tf.int32, tf.int32, tf.float32)\n gen_shapes = ([self.npoint, 3], [self.npoint, 6], [self.npoint], [1], [self.npoint], [], [self.npoint])\n\n return gen_func, gen_types, gen_shapes", "def _build_batch_kwargs_generator(self, **kwargs):\n generator = instantiate_class_from_config(\n config=kwargs,\n runtime_environment={\"datasource\": self},\n config_defaults={\n \"module_name\": \"great_expectations.datasource.batch_kwargs_generator\"\n },\n )\n if not generator:\n raise ClassInstantiationError(\n module_name=\"great_expectations.datasource.batch_kwargs_generator\",\n package_name=None,\n class_name=kwargs[\"class_name\"],\n )\n\n return generator", "def next_batch(self, batch_size):\n raise NotImplementedError", "def generate_batch(\n batch, vocab: Dict[str, int]\n ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:\n input_unigrams = [DatasetLSTM.encode_sequence(b[0][0], vocab) for b in batch]\n input_bigrams = [DatasetLSTM.encode_sequence(b[0][1], vocab) for b in batch]\n input_unigrams = torch.tensor(input_unigrams)\n input_bigrams = torch.tensor(input_bigrams)\n labels = torch.tensor([b[1] for b in batch])\n return (input_unigrams, input_bigrams), labels", "def data_gen(\n v: int, batch: int, nbatches: int, device: torch.device = torch.device(\"cpu\")\n) -> Iterator[Batch]: # TODO bad name\n for i in range(nbatches):\n data = np.random.randint(1, v, size=(batch, 10))\n data[:, 0] = 1\n src: LongTensorType = torch.from_numpy(data)\n tgt: LongTensorType = torch.from_numpy(data)\n src, tgt = src.to(device), tgt.to(device)\n yield Batch(src, tgt, 0)", "def next(self):\n if self._curr_batch + 1 > self.num_batches:\n # no more batches in current iteration through data set so start\n # new epoch ready for another pass and indicate iteration is at end\n self.new_epoch()\n raise StopIteration()\n # create an index slice corresponding to current batch number\n batch_slice = slice(self._curr_batch * self.batch_size,\n (self._curr_batch + 1) * self.batch_size)\n inputs_batch = self.inputs[batch_slice]\n targets_batch = self.targets[batch_slice]\n self._curr_batch += 1\n return inputs_batch, targets_batch", "def train_batch_iter(self, batch_size, num_epochs):\n return self.batch_iter(0, batch_size, num_epochs)", "def generator(features, labels, batch_size):\n \n # Create empty arrays to contain batch of features and labels#\n batch_features = np.zeros((batch_size, 160, 320, 3))\n batch_labels = np.zeros((batch_size, 1))\n while True:\n for i in range(batch_size):\n # choose random index in features\n index = random.choice(range(len(features)))\n batch_features[i] = features[index]\n batch_labels[i] = labels[index]\n yield batch_features, batch_labels", "def _batch(self, batch_size):\n transform_or_spec = self._specs.get(\n 'transform_or_spec', self.transform_or_spec)\n if hasattr(transform_or_spec, '_batch'):\n transform_or_spec = transform_or_spec._batch(batch_size)\n return _DeferredTensorSpec(\n self._get_batched_input_spec(batch_size),\n transform_or_spec=transform_or_spec,\n dtype=self.dtype,\n shape=(None if self.shape is None\n else tf.TensorShape([batch_size]).concatenate(self.shape)),\n name=self.name,\n also_track_spec=self._also_track_spec)", "def gen(num_batches,\n batch_size,\n seq_width,\n min_len,\n max_len):\n for batch_num in range(num_batches):\n\n # All batches have the same sequence length\n seq_len = random.randint(min_len, max_len)\n seq = np.random.binomial(1, 0.5, (seq_len, batch_size, seq_width))\n seq = Variable(torch.from_numpy(seq))\n\n # The input includes an additional channel used for the delimiter\n inp = Variable(torch.zeros(seq_len + 1, batch_size, seq_width + 1))\n inp[:seq_len, :, :seq_width] = seq\n inp[seq_len, :, seq_width] = 1.0 # delimiter in our control channel\n outp = seq.clone()\n\n yield batch_num+1, inp.float().to(params.device), outp.float().to(params.device)", "def build_generators(folder_path, train_batch_size, val_batch_size, height, width):\n train_image_generator, train_mask_generator = create_train_generator(folder_path,\n train_batch_size,\n (height, width),\n preprocessing_masks)\n val_image_generator, val_mask_generator = create_validation_generator(folder_path,\n val_batch_size,\n (height, width),\n preprocessing_masks)\n my_train_generator = my_image_mask_generator(train_image_generator, train_mask_generator)\n my_val_generator = my_image_mask_generator(val_image_generator, val_mask_generator)\n\n return my_train_generator, my_val_generator", "def get_train_iterator(self) -> Iterable[Batch]:\n if self._train_name not in self._datasets:\n raise ValueError(\"Training data not provided.\")\n return self.get_iterator(self._train_name)", "def data_gen(voc_size, batch, nbatches, seq_len = 15):\r\n for i in range(nbatches):\r\n # (batch_size, seq_len)\r\n data = torch.from_numpy(\r\n np.random.randint(1, voc_size, size=(batch, seq_len)))\r\n data[:, 0] = 1 # add start token\r\n src = Variable(data, requires_grad=False)\r\n tgt = Variable(data, requires_grad=False)\r\n yield Batch(src, tgt, 0) # Accessed by next function one by one\r", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n #fake_image = [1.0 for _ in xrange(784)]\n fake_image = [1.0 for _ in range(784)]\n fake_label = 0\n #return [fake_image for _ in xrange(batch_size)], [\n # fake_label for _ in xrange(batch_size)]\n return [fake_image for _ in range(batch_size)], [\n fake_label for _ in range(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def get_batch_size():\n return get_global_variable(GraphKeys.BATCH_SIZE)", "def batch_generator(training_data, sequence_length=15, window_size = 15):\n engine_ids = list(training_data[\"engine_id\"].unique())\n temp = training_data.copy()\n for id_ in engine_ids:\n indexes = temp[temp[\"engine_id\"] == id_].index\n traj_data = temp.loc[indexes]\n cutoff_cycle = max(traj_data['cycle']) - sequence_length - window_size + 1\n \n if cutoff_cycle<0:\n drop_range = indexes\n print(\"sequence_length + window_size is too large\")\n else:\n cutoff_cycle_index = traj_data['cycle'][traj_data['cycle'] == cutoff_cycle+2].index\n drop_range = list(range(cutoff_cycle_index[0], indexes[-1] + 1))\n \n temp.drop(drop_range, inplace=True)\n indexes = list(temp.index)\n del temp\n \n feature_number = training_data.shape[1]-3\n\n x_shape = (len(indexes), sequence_length, window_size, feature_number)\n x_batch = np.zeros(shape=x_shape, dtype=np.float32)\n y_shape = (len(indexes))\n y_batch = np.zeros(shape=y_shape, dtype=np.float32)\n\n alt_index = indexes[0]\n for batch_index, index in enumerate(indexes):\n y_batch[batch_index] = training_data.iloc[index+window_size-2+sequence_length,-1]\n \n\n \n if index-alt_index==1 and batch_index!=0:\n temp_window = training_data.iloc[index+sequence_length-1:index+sequence_length-1 + window_size, 2:-1].values.reshape(1,window_size,-1)\n x_batch[batch_index] = np.concatenate((x_batch[batch_index-1][1:],temp_window))\n else:\n for seq in range(sequence_length):\n x_batch[batch_index][seq] = training_data.iloc[index+seq:index+seq + window_size, 2:-1].values\n alt_index = index\n\n \n return x_batch, y_batch", "def batch_data(cls, train_data, train_labels, batch_size):\n for batch in range(int(np.ceil(train_data.shape[0] / batch_size))):\n start = batch_size * batch\n end = start + batch_size\n if end > train_data.shape[0]:\n yield batch, (train_data[start:train_data.shape[0]], \\\n train_labels[start:train_data.shape[0]])\n else:\n yield batch, (train_data[start:end], \\\n train_labels[start:end])", "def next_batch(self, batch_size):\n # Get batch\n assert(batch_size == 1)\n em, mask_list, seed_list = self.next_example(self.K)\n\n # Reshape for batch size 1\n em_batch = np.expand_dims(em, 0)\n mask_list = [np.expand_dims(m,0) for m in mask_list]\n \n return em_batch, mask_list", "def test_batch_generator(self, dir_name):\n input = np.zeros((self.batch_size, self.max_seq_len,\n self.embedding_size))\n seq_lengths = np.zeros((self.batch_size), dtype=np.intp)\n unique_counts = np.zeros((self.batch_size), dtype=np.intp)\n labels = np.zeros((self.batch_size), dtype=np.intp)\n i = 0\n\n fi = open(dir_name + \"all.txt\")\n sample_gen = self.dev_sample_generator(fi)\n self.load_embedding()\n\n for sequence, seq_length, unique_count, label in sample_gen:\n seq_lengths[i], labels[i], unique_counts[i] = seq_length, label, unique_count\n if seq_lengths[i] > self.max_seq_len:\n seq_lengths[i] = self.max_seq_len\n sequence = sequence[:seq_lengths[i]]\n input[i, 0:seq_lengths[i], :] = self.embedding[sequence, :]\n\n i += 1\n\n if i == self.batch_size:\n yield input, seq_lengths, unique_counts, labels\n input = np.zeros(\n (self.batch_size, self.max_seq_len,\n self.embedding_size)\n )\n i = 0\n\n if i < self.batch_size:\n yield input[:i, :, :], seq_lengths[:i], unique_counts[:i], labels[:i]\n\n fi.close()", "def get_batch(iterator, batch_size):\n while True:\n center_batch = np.zeros(batch_size, dtype = np.uint32)\n target_batch = np.zeros((batch_size, 1), dtype = np.uint32)\n for index in range(batch_size):\n center_batch[index], target_batch[index] = next(iterator)\n\n yield center_batch, target_batch", "def next(self):\n if self._curr_batch + 1 > self.num_batches:\n # no more batches in current iteration through data set so start\n # new epoch ready for another pass and indicate iteration is at end\n self.new_epoch()\n raise StopIteration()\n # create an index slice corresponding to current batch number\n batch_slice = slice(self._curr_batch * self.batch_size,\n (self._curr_batch + 1) * self.batch_size)\n inputs_batch = self.inputs[batch_slice]\n targets_batch = self.targets[batch_slice]\n # target_ids_global = self.target_ids[batch_slice]\n target_ids_batch = self.target_ids[batch_slice]\n self._curr_batch += 1\n\n batch_inputs, batch_target_ids, batch_targets = \\\n self.transform_batch(inputs_batch, target_ids_batch, targets_batch)\n\n return batch_inputs, batch_targets, batch_target_ids", "def get_validation_batch(self, batch_size):\n if batch_size > len(self.val_indices):\n return self.data_handler.slice_data(self.val_indices)\n else:\n return self.data_handler.slice_data(list(np.random.choice(self.val_indices, size=batch_size)))", "def batch_input_generator(a_walk, random_walk_length, window_size):\n seq_1 = [a_walk[j] for j in range(random_walk_length-window_size)]\n seq_2 = [a_walk[j] for j in range(window_size, random_walk_length)]\n return np.array(seq_1 + seq_2)", "def get_batches_train_valid(PM: path_manager.PathManager,\n image_augmentation_multiple=1,\n batch_size=1) -> (image.ImageDataGenerator, image.ImageDataGenerator):\n train_batches = get_batches_augmented(PM.train, image_augmentation_multiple, batch_size=batch_size)\n valid_batches = get_batches(PM.valid, shuffle=False, batch_size=batch_size)\n return train_batches, valid_batches", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in range(batch_size)], [\n fake_label for _ in range(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n\n # Shuffle the data\n np.random.seed(0)\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in xrange(batch_size)], [\n fake_label for _ in xrange(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def default_generator(self,\n dataset,\n epochs=1,\n predict=False,\n deterministic=True,\n pad_batches=True):\n for epoch in range(epochs):\n if not predict:\n print('Starting epoch %i' % epoch)\n for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(\n batch_size=self.batch_size,\n deterministic=deterministic,\n pad_batches=pad_batches):\n\n feed_dict = dict()\n if y_b is not None and not predict:\n for index, label in enumerate(self.labels_fd):\n if self.mode == \"classification\":\n feed_dict[label] = to_one_hot(y_b[:, index])\n if self.mode == \"regression\":\n feed_dict[label] = y_b[:, index:index + 1]\n if w_b is not None:\n feed_dict[self.weights] = w_b\n # Transform SMILES string to integer vectors\n smiles_seqs = [self.smiles_to_seq(smiles) for smiles in ids_b]\n feed_dict[self.smiles_seqs] = np.stack(smiles_seqs, axis=0)\n yield feed_dict", "def create_window_generator(window, batch_size, train_x, train_y, test_x, test_y, prediction_mode):\n train_generator = k.preprocessing.sequence.TimeseriesGenerator(train_x, train_y,\n length=window,\n batch_size=batch_size)\n\n test_generator = k.preprocessing.sequence.TimeseriesGenerator(test_x, test_y,\n length=window,\n batch_size=batch_size)\n\n return train_generator, test_generator", "def fit_generator(self, generator, *args, **kwargs):\n self.model.fit_generator(\n generator,\n *args, **kwargs\n )", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in xrange(batch_size)], [\n fake_label for _ in xrange(batch_size)\n ]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def get_batches_fn(batch_size):\n # Shuffle training data\n rnd.shuffle(image_paths)\n # Loop through batches and grab images, yielding each batch\n for batch_i in range(0, samples_n, batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n # Re-size to image_shape\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n # Create \"one-hot-like\" labels by class\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in range(batch_size)], [fake_label for _ in range(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n\n # Shuffle data\n np.random.seed(0)\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n\n end = self._index_in_epoch\n\n return self._images[start:end], self._labels[start:end]", "def callbacks(self, val_generator: DataGenerator) -> List[Callback]:\n if self.train_mode in [\"classifier\", \"both\"]:\n model = self.classifier\n model_dir = \"class_model\"\n elif self.train_mode == \"combined\":\n model = self.combined\n model_dir = \"com_model\"\n\n # Callback for evaluating the validation dataset\n eval_callback = EvalCallback(\n model=model, val_generator=val_generator, layers=self.n_blocks\n )\n\n # callback for saving the best model\n checkpoint_callback = ModelCheckpoint(\n f\"{model_dir}/{model_dir}\" + \"_{epoch:04d}_{val_acc:.4f}.h5\",\n monitor=\"val_acc\",\n verbose=0,\n save_best_only=True,\n save_weights_only=False,\n mode=\"max\",\n )\n\n \"Make sure checkpoint callback is after the eval_callback, dependency\"\n return [eval_callback, checkpoint_callback]", "def get_batches(arr, batch_size, seq_length):\n\n ## TODO: Get the number of batches we can make\n n_batches = int(arr.size / (batch_size * seq_length))\n\n ## TODO: Keep only enough characters to make full batches\n arr = arr[:(n_batches * batch_size * seq_length)]\n\n ## TODO: Reshape into batch_size rows\n arr = arr.reshape((batch_size, -1))\n\n ## TODO: Iterate over the batches using a window of size seq_length\n for n in range(0, arr.shape[1], seq_length):\n # The features\n x = arr[:, n:(n + seq_length)]\n # The targets, shifted by one\n y = np.zeros_like(x)\n try:\n y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n + seq_length]\n except IndexError:\n y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]\n yield x, y", "def generate_batches(data, labels, batch_size):\n for start in range(0, len(data), batch_size):\n yield Tensor(data[start:start+batch_size, ...]), Tensor(labels[start:start+batch_size, ...])", "def get_batches(path, gen=image.ImageDataGenerator(), shuffle=True, batch_size=8, class_mode='categorical'):\n return gen.flow_from_directory(path,\n target_size=(ROWS, COLS),\n class_mode=class_mode,\n shuffle=shuffle,\n batch_size=batch_size)", "def get_train_generators(cf, logger):\n config_file = os.environ[CONFIG_ENV_VAR]\n config = load_config(config_file)\n\n all_sections = find_all_subdir_sections(config)\n\n # separate into training and validation folds randomly\n fold_ratios = config[\"train_validation_splits\"]\n # rng = np.random.default_rng(seed=config[\"split_random_seed\"])\n # rng.shuffle(all_sections)\n rnd = random.Random(config[\"split_random_seed\"])\n rnd.shuffle(all_sections)\n split_idx = round(fold_ratios[0] * len(all_sections))\n train_sections = all_sections[:split_idx]\n val_sections = all_sections[split_idx:]\n\n logger.info(\n \"Loaded %d annotation sections, using %d train, %d val\"\n % (len(all_sections), len(train_sections), len(val_sections))\n )\n\n train_pipeline = create_data_gen_pipeline(\n train_sections, cf=cf, annotation_config=config, is_training=True\n )\n val_pipeline = create_data_gen_pipeline(\n val_sections, cf=cf, annotation_config=config, is_training=False\n )\n batch_gen = {\n \"train\": train_pipeline,\n \"val_sampling\": val_pipeline,\n \"n_val\": len(val_sections),\n }\n # batch_gen[\"val_patient\"] = create_data_gen_pipeline(\n # val_sections, cf=cf, annotation_config=config, is_training=False\n # )\n\n return batch_gen", "def generator(self):\n\n # Each thread gets its own randomized set of keys\n keys = self.loader.keys()\n\n while True:\n random.shuffle(keys)\n data_batch = []\n label_batch = []\n\n for key in keys:\n data = self.loader.get(key)\n s = StringIO(data)\n img = PIL.Image.open(s)\n img = img.resize((224, 224))\n img = img.convert('RGB')\n data_batch.append(np.array(img))\n\n label_str = self._classname_from_key(key)\n label_int = self._classname_to_label[label_str]\n label_arr = np.zeros(self.num_classes())\n label_arr[label_int] = 1 # one-hot encoding\n label_batch.append(label_arr)\n\n if len(data_batch) == 32: # batch size\n yield np.array(data_batch), np.array(label_batch)\n data_batch = []\n label_batch = []", "def generate_batch(self, x: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:\n\n batch_size, max_length = x.shape[0], x.shape[1]\n\n start_batch = tf.expand_dims(x[:, 0], -1)\n\n sampled_preds = tf.zeros([batch_size, 0, self.vocab_size])\n sampled_batch = start_batch\n\n self.G.reset_states()\n\n for _ in range(max_length):\n _, preds, start_batch = self.G(start_batch)\n\n sampled_preds = tf.concat([sampled_preds, preds], 1)\n sampled_batch = tf.concat([sampled_batch, start_batch], 1)\n\n sampled_batch = sampled_batch[:, 1:]\n\n return sampled_batch, sampled_preds", "def add_batch_kwargs_generator(self, name, class_name, **kwargs):\n kwargs[\"class_name\"] = class_name\n generator = self._build_batch_kwargs_generator(**kwargs)\n if \"batch_kwargs_generators\" not in self._datasource_config:\n self._datasource_config[\"batch_kwargs_generators\"] = {}\n self._datasource_config[\"batch_kwargs_generators\"][name] = kwargs\n\n return generator" ]
[ "0.62859094", "0.627278", "0.6238411", "0.6231443", "0.61449736", "0.6116078", "0.6102709", "0.6071425", "0.6071153", "0.60535413", "0.60365564", "0.6031602", "0.60167223", "0.59738314", "0.5962088", "0.59487414", "0.59423554", "0.5910736", "0.5903543", "0.5894708", "0.58606577", "0.583326", "0.58268565", "0.582496", "0.58238584", "0.5815553", "0.5812809", "0.5807285", "0.5804486", "0.5787141", "0.57768494", "0.57712734", "0.57675886", "0.5761158", "0.57438874", "0.5723172", "0.57219726", "0.57219344", "0.57199955", "0.57148176", "0.5714479", "0.56928164", "0.56855553", "0.5676121", "0.56628454", "0.56553453", "0.56333417", "0.56233114", "0.5604449", "0.55989784", "0.55987734", "0.559366", "0.55911666", "0.5565509", "0.55641145", "0.55584043", "0.5534624", "0.5532684", "0.5531296", "0.5523859", "0.5522713", "0.55192405", "0.55142224", "0.5513597", "0.55055416", "0.5500461", "0.54999566", "0.5495384", "0.54896736", "0.54846454", "0.5469572", "0.5467812", "0.54550785", "0.54529303", "0.545124", "0.54510814", "0.54458904", "0.54404926", "0.54400855", "0.5438715", "0.5431082", "0.542503", "0.5422957", "0.5414847", "0.5404157", "0.54030544", "0.5397544", "0.5397028", "0.5393887", "0.539286", "0.53858376", "0.53744113", "0.5365707", "0.5365409", "0.5360744", "0.5356191", "0.5349747", "0.53479385", "0.5340229", "0.5339469" ]
0.6673094
0
Design the model for which the loss is optimized
def build_model(self, input_shape, design_embedding, **kwargs): return design_embedding(input_shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_model(self):\n self.g12 = G12(conv_dim=self.g_conv_dim)\n init_weights(self.g12, init_type='normal')\n self.g21 = G21(conv_dim=self.g_conv_dim)\n init_weights(self.g21, init_type='normal')\n self.d1 = D1(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d1, init_type='normal')\n self.d2 = D2(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d2, init_type='normal')\n self.dreid = DSiamese(class_count=self.num_classes_market)\n\n g_params = list(self.g12.parameters()) + list(self.g21.parameters())\n d_params = list(self.d1.parameters()) + list(self.d2.parameters())\n dr_params = list(self.dreid.parameters())\n\n self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])\n self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])\n self.dr_optimizer = optim.Adam(dr_params, self.lr, [self.beta1, self.beta2])\n\n if torch.cuda.is_available():\n self.g12.cuda()\n self.g21.cuda()\n self.d1.cuda()\n self.d2.cuda()\n self.dreid.cuda()", "def build_model(self):\n states = layers.Input(shape=(self.state_size, ), name='states')\n\n \n # Kernel initializer with fan-in mode and scale of 1.0\n kernel_initializer = initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal', seed=None)\n\n # Add hidden layers\n net = layers.Dense(units=400, activation='elu', kernel_initializer=kernel_initializer)(states)\n net = layers.Dense(units=300, activation='elu', kernel_initializer=kernel_initializer)(net)\n\n # Add final output layer with sigmoid activation\n raw_actions = layers.Dense(units=self.action_size, activation='sigmoid', name='raw_actions', kernel_initializer=kernel_initializer)(net)\n\n\n\n # Scale outpout to proper range\n actions = layers.Lambda(lambda x: (x * self.action_range) + self.action_low,\n name='actions')(raw_actions)\n\n # Create Keras model\n self.model = models.Model(inputs=states, outputs=actions)\n\n # Define loss function using action value gradients\n action_gradients = layers.Input(shape=(self.action_size, ))\n\n #### Why this function ?? (Q value) gradients\n loss = K.mean(-action_gradients * actions)\n\n # Any other Loss\n\n optimizer = optimizers.Adam(lr=0.0001)\n updates_op = optimizer.get_updates(params=self.model.trainable_weights, loss=loss)\n self.train_fn = K.function(\n inputs=[self.model.input, action_gradients, K.learning_phase()],\n outputs=[],\n updates=updates_op)", "def compile(self, learning_rate, momentum):\n # Optimizer object\n if self.config.ADAMW:\n from nets.adamw import AdamW\n optimizer = AdamW(lr=learning_rate, decay=0.001, weight_decay=self.config.WEIGHT_DECAY, \n clipnorm=self.config.GRADIENT_CLIP_NORM)\n else:\n optimizer = keras.optimizers.SGD(\n lr=learning_rate, momentum=momentum,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n \n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n # ************************* NOTE for 2 label dataset \n if self.config.HAVE_LABEL2:\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_class_loss2\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n else:\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n # print (self.keras_model.metrics_names)\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model._metrics_tensors.update({name: loss})\n # self.keras_model._compile_stateful_metrics_tensors.update({name: loss})\n # print (\"================\",self.keras_model._compile_stateful_metrics_tensors)", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>\n self.loss_names = ['D_adv','D_cls', 'G_A','G_B', 'cycle_A','G_adv','reg','idt']\n # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>\n visual_names_A = ['real_A','A','mask_A', 'fake_B','B','mask_B', 'rec_A']\n #visual_names_B = ['real_B', 'fake_A', 'rec_B']\n # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)\n # if self.isTrain and self.opt.lambda_identity > 0.0:\n # visual_names_A.append('idt_B')\n # #visual_names_B.append('idt_A')\n\n # combine visualizations for A and B\n self.visual_names = visual_names_A #+ visual_names_B\n # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.\n \n\n # define networks (both Generators and discriminators)\n # The naming is different from those used in the paper.\n # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)\n self.netG_A=[]\n self.netG_B=[]\n self.netG_Amask=[]\n self.netG_Bmask=[]\n if self.isTrain:\n self.model_names += ['G_A', 'G_Amask', 'G_B', 'G_Bmask', 'D', 'Dadv']\n else: # during test time, only load Gs\n self.model_names = ['G_A', 'G_Amask', 'G_B', 'G_Bmask']\n for i in range(opt.num_class):\n tG_A, tG_Amask = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n \n self.netG_A.append(tG_A)\n self.netG_Amask.append(tG_Amask)\n tG_B, tG_Bmask = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n self.netG_B.append(tG_B)\n self.netG_Bmask.append(tG_Bmask)\n\n self.netD= networks.define_D(opt.output_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids,opt.num_class)\n self.netDadv = networks.define_D(opt.output_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids, 1)\n \n\n if self.isTrain:\n if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels\n assert(opt.input_nc == opt.output_nc)\n # create image buffer to store previously generated images\n # self.fake_A_pool = ImagePool(opt.pool_size)\n # create image buffer to store previously generated images\n # self.fake_B_pool = ImagePool(opt.pool_size)\n # define loss functions\n # define GAN loss.\n self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)\n self.criterionGAN_D = networks.GANLoss('multi-label').to(self.device)\n self.criterionCycle = torch.nn.L1Loss()\n self.criterionIdt = torch.nn.L1Loss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizers_G=[]\n for i in range(opt.num_class):\n self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A[i].parameters(\n ), self.netG_B[i].parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) \n self.optimizers_G.append(self.optimizer_G)\n \n self.optimizer_D = torch.optim.Adam(self.netD.parameters(\n ), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers=self.optimizers_G+[self.optimizer_D]", "def build_model():\n model = models.Sequential()\n\n # # Anti-overfit methods\n # model.add(layers.BatchNormalization())\n # model.add(layers.Dropout(0.5))\n # regularizers.l1_l2(l1=0.01, l2=0.01)\n\n model.add(layers.Conv2D(200, (3, 3), activation='relu',\n input_shape=nnc.INPUT_SHAPE))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(200, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(150, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(100, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(256, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(7, activation='sigmoid'))\n model.compile(optimizer=nnc.OPTIMIZER, loss=nnc.LOSS, metrics=nnc.METRICS)\n\n # # Print the model to the console\n model.summary()\n # # Print the model to a png file\n # utils.plot_model(model, show_shapes=True, to_file=nnc.MODEL_PLOT_PATH)\n # # Turn into multi-gpu model\n # model = utils.multi_gpu_model(model, gpus=2)\n\n return model", "def create_model(input_dim):\n n = 64\n# global input_dim\n model = tensorflow.keras.models.Sequential()\n \n model.add(Dense(n, input_dim=input_dim, kernel_initializer='uniform'))\n model.add(BatchNormalization())\n# model.add(LeakyReLU(alpha=0.1))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(n, input_dim=input_dim, kernel_initializer='uniform'))\n model.add(BatchNormalization())\n# model.add(LeakyReLU(alpha=0.1))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n \n model.add(Dense(1, kernel_initializer='uniform',activation='linear'))\n model.compile(loss='mean_absolute_error', optimizer='adam', metrics=['mse', 'mae', 'accuracy']) \n #model.compile(loss=r2_keras, optimizer='adam', metrics=['mse', 'mae', 'accuracy', r2_keras]) \n return model", "def _create_model(self):\n ref = 0 if self.m_cfg['configs']['recursive'] else -1\n out_t, l_t, models = [], [], []\n in_t = [tf.keras.Input(batch_size=self.m_cfg['configs']['batch'],\n shape=self.m_cfg['configs']['patch'])]\n for level in np.arange(self.levels):\n if not self.m_cfg['configs']['recursive'] or not level:\n lat, res, layers = self._set_level_ops(in_t[-1], level)\n opt = self._inst_optimizer()\n self.opt += [opt]\n curr_layers = sum(layers, [])\n vars = sum(list(map(lambda l: l.variables, curr_layers)), [])\n self.vars.append(vars)\n elif self.m_cfg['configs']['recursive']:\n lat, res, layers = self._set_level_ops(in_t[-1], level, layers)\n\n out_t += [res]\n l_t += [lat]\n in_t += [tf.keras.layers.Subtract()([in_t[ref], out_t[-1]])]\n\n inputs, outputs = in_t[0], [in_t[:-1], l_t, out_t]\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.loss = Losses(self.m_cfg['configs']['loss']).value", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def create_model(self, input_shape, num_actions, mode, args, model_name='q_network'):\n assert (mode in (\"linear\", \"duel\", \"dqn\"))\n with tf.variable_scope(model_name):\n input_data = Input(shape=input_shape, name=\"input\")\n if mode == \"linear\":\n # #version 4 elu:\n # flatten_hidden = Flatten(name=\"flatten\")(input_data)\n # FC_1 = Dense(512, activation='elu', name='FC1-elu')(flatten_hidden)\n # FC_2 = Dense(512, activation='elu', name='FC2-elu')(FC_1)\n # FC_3 = Dense(512, activation='elu', name='FC3-elu')(FC_2)\n # FC_4 = Dense(512, activation='elu', name='FC4-elu')(FC_3)\n # output = Dense(num_actions, activation='elu', name=\"output\")(FC_4)\n\n #version 4 elu:\n flatten_hidden = Flatten(name=\"flatten\")(input_data)\n FC_1 = Dense(1024, activation='elu', name='FC1-elu')(flatten_hidden)\n FC_2 = Dense(1024, activation='elu', name='FC2-elu')(FC_1)\n FC_3 = Dense(1024, activation='elu', name='FC3-elu')(FC_2)\n FC_4 = Dense(512, activation='elu', name='FC4-elu')(FC_3)\n output = Dense(num_actions, activation='elu', name=\"output\")(FC_4)\n\n else:\n if not (args.recurrent):\n # # # version 1:\n # h1 = Convolution2D(32, (8, 8), strides=4, activation=\"relu\", name=\"conv1\")(input_data)\n # h2 = Convolution2D(64, (4, 4), strides=2, activation=\"relu\", name=\"conv2\")(h1)\n # h3 = Convolution2D(64, (3, 3), strides=1, activation=\"relu\", name=\"conv3\")(h2)\n # context = Flatten(name=\"flatten\")(h3)\n\n # # version 2:\n # conv1 = Convolution2D(1, (5, 5), strides=1, activation=\"elu\", name=\"conv1\")(input_data)\n # flatten = Flatten(name=\"flatten\")(conv1)\n # FC_2 = Dense(512, activation='elu', name='FC2-elu')(flatten)\n # context = Dense(512, activation='elu', name='FC4-elu')(FC_2)\n\n # version 3:\n conv1 = Convolution2D(32, (2, 2), strides=1, activation=\"relu\", name=\"conv1\")(input_data)\n flatten = Flatten(name=\"flatten\")(conv1)\n FC_2 = Dense(128, activation='relu', name='FC2-relu')(flatten)\n FC_3 = Dense(128, activation='relu', name='FC3-relu')(FC_2)\n context = Dense(128, activation='elu', name='FC4-elu')(FC_3)\n\n\n\n # else:\n # print('>>>> Defining Recurrent Modules...')\n # input_data_expanded = Reshape((input_shape[0], input_shape[1], input_shape[2], 1),\n # input_shape=input_shape)(input_data)\n # input_data_TimeDistributed = Permute((3, 1, 2, 4), input_shape=input_shape)(input_data_expanded)\n # h1 = TimeDistributed(Convolution2D(32, (8, 8), strides=4, activation=\"relu\", name=\"conv1\"), \\\n # input_shape=(args.num_frames, input_shape[0], input_shape[1], 1))(\n # input_data_TimeDistributed)\n # h2 = TimeDistributed(Convolution2D(64, (4, 4), strides=2, activation=\"relu\", name=\"conv2\"))(h1)\n # h3 = TimeDistributed(Convolution2D(64, (2, 2), strides=1, activation=\"relu\", name=\"conv3\"))(h2)\n # flatten_hidden = TimeDistributed(Flatten())(h3)\n # hidden_input = TimeDistributed(Dense(512, activation='relu', name='flat_to_512'))(flatten_hidden)\n # if not (args.a_t):\n # context = LSTM(512, return_sequences=False, stateful=False, input_shape=(args.num_frames, 512))(\n # hidden_input)\n # else:\n # if args.bidir:\n # hidden_input = Bidirectional(\n # LSTM(512, return_sequences=True, stateful=False, input_shape=(args.num_frames, 512)),\n # merge_mode='sum')(hidden_input)\n # all_outs = Bidirectional(\n # LSTM(512, return_sequences=True, stateful=False, input_shape=(args.num_frames, 512)),\n # merge_mode='sum')(hidden_input)\n # else:\n # all_outs = LSTM(512, return_sequences=True, stateful=False,\n # input_shape=(args.num_frames, 512))(hidden_input)\n # # attention\n # attention = TimeDistributed(Dense(1, activation='tanh'))(all_outs)\n # # print(attention.shape)\n # attention = Flatten()(attention)\n # attention = Activation('softmax')(attention)\n # attention = RepeatVector(512)(attention)\n # attention = Permute([2, 1])(attention)\n # sent_representation = merge([all_outs, attention], mode='mul')\n # context = Lambda(lambda xin: K.sum(xin, axis=-2), output_shape=(512,))(sent_representation)\n # # print(context.shape)\n\n if mode == \"dqn\":\n h4 = Dense(512, activation='elu', name=\"fc\")(context)\n output = Dense(num_actions, name=\"output\")(h4)\n # elif mode == \"duel\":\n # value_hidden = Dense(512, activation='relu', name='value_fc')(context)\n # value = Dense(1, name=\"value\")(value_hidden)\n # action_hidden = Dense(512, activation='relu', name='action_fc')(context)\n # action = Dense(num_actions, name=\"action\")(action_hidden)\n # action_mean = Lambda(lambda x: tf.reduce_mean(x, axis=1, keep_dims=True), name='action_mean')(\n # action)\n # output = Lambda(lambda x: x[0] + x[1] - x[2], name='output')([action, value, action_mean])\n model = Model(inputs=input_data, outputs=output)\n print(model.summary())\n return model", "def compile(self, optimizer = None, loss_func = None):\n if loss_func is None:\n #loss_func = mean_huber_loss\n loss_func = 'mse'\n if optimizer is None:\n optimizer = Adam(lr = self.learning_rate)\n # optimizer = RMSprop(lr=0.00025)\n with tf.variable_scope(\"Loss\"):\n state = Input(shape = (self.frame_height, self.frame_width, self.num_frames) , name = \"states\")\n action_mask = Input(shape = (self.num_actions,), name = \"actions\")\n qa_value = self.q_network(state)\n qa_value = merge([qa_value, action_mask], mode = 'mul', name = \"multiply\")\n qa_value = Lambda(lambda x: tf.reduce_sum(x, axis=1, keep_dims = True), name = \"sum\")(qa_value)\n\n #loss_func = losses.mean_squared_error\n self.final_model = Model(inputs = [state, action_mask], outputs = qa_value)\n self.final_model.compile(loss=loss_func, optimizer=optimizer)", "def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)", "def build_model(self):\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n if self.config.optimizer == 'sgd':\n self.optimizer = tf.keras.optimizers.SGD(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'rms':\n self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adam':\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adagrad':\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adadelta':\n self.optimizer = tf.keras.optimizers.Adadelta(learning_rate=self.config.learning_rate)\n else:\n raise NotImplementedError(\"No support for %s optimizer\" % self.config.optimizer)\n \n if self.config.optimizer in ['rms', 'adagrad', 'adadelta']:\n with tf.device('cpu:0'):\n self.model.def_parameters()\n else:\n self.model.def_parameters()\n\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)", "def build_cut_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model = Model(model.input, model.get_layer(self.ex_last_conv_layer_name2).output)\n model.summary()\n return model", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n self.loss_names = ['G_GAN', 'D_real', 'D_fake', 'Feat', 'VGG', 'SSIM', 'PSNR']\n self.visual_names = ['fake_B', 'real_B']\n if self.isTrain:\n self.model_names = ['G', 'D']\n else:\n self.model_names = ['G']\n\n self.netG = generator.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids,\n not opt.no_transp_conv,\n opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers,\n opt.n_blocks_local)\n\n if self.isTrain:\n self.netD = discriminator.define_D(opt.input_nc + opt.output_nc, opt.ndf, 'pix2pixHD_multiscale',\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids,\n not (opt.gan_mode == 'lsgan'), opt.num_D)\n\n self.criterionGAN = loss.GANLoss(opt.gan_mode, multiscale_D=opt.netD == 'pix2pixHD_multiscale').to(\n self.device)\n self.criterionVGG = loss.VGGLoss().to(self.device)\n self.criterionFeat = loss.FeatureMatchingLoss(opt.n_layers_D, opt.num_D)\n\n self.criterionSSIM = loss.SkimageLoss(partial(ssim, multichannel=True))\n self.criterionPSNR = loss.SkimageLoss(psnr)\n\n if opt.netG.startswith('pix2pixHD') and (opt.n_epochs_fix_global > 0):\n params_dict = dict(self.netG.named_parameters())\n netG_params = []\n for key, value in params_dict.items():\n if key.startswith('model' + str(opt.n_local_enhancers)):\n netG_params += [value]\n else:\n netG_params = self.netG.parameters()\n\n self.optimizer_G = torch.optim.Adam(netG_params, lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n if opt.load_pretrain:\n pretrained_path = '' if not self.isTrain else opt.load_pretrain\n self.load_network(self.netG, 'G', opt.epoch, pretrained_path)\n if self.isTrain:\n self.load_network(self.netD, 'D', opt.epoch, pretrained_path)\n\n self.real_A = None\n self.real_B = None\n self.fake_A = None\n self.fake_B = None\n self.loss_D_real = None\n self.loss_D_fake = None\n self.loss_D = None\n self.loss_G_GAN = None\n self.loss_Feat = None\n self.loss_VGG = None\n self.loss_G = None\n self.loss_SSIM = None\n self.loss_PSNR = None", "def optimizer(self, model: nn.Module) -> torch.optim.Optimizer: # type: ignore\n pass", "def build_model(self):\r\n self.images, self.labels = self.dataloader.get_model_inputs()\r\n\r\n model = SimpleModel(self.images, self.labels, output_dim=F.output_dim, scope='source_regressor')\r\n self.out, _ = model.get_model()\r\n self.get_loss()", "def build_model(self):\n import tensorflow as tf\n \n y = tf.nn.relu(tf.matmul(self.variables[\"input_observation\"], self.variables[\"W1\"]) + \n self.variables[\"b1\"], name=\"y1\")\n \n for i in range(self.n_hidden-1):\n y = tf.nn.relu(tf.matmul(y, self.variables[\"W\"+str(i+2)]) + \n self.variables[\"b\"+str(i+2)], name=\"y\"+str(i+2))\n \n self.variables[\"y\"] = [tf.matmul(y, self.variables[\"Wo_0\"]) + self.variables[\"bo_0\"]]\n for i in range(1, len(self.output_size)):\n self.variables[\"y\"] += [tf.matmul(y, self.variables[\"Wo_%s\"%i]) + self.variables[\"bo_%s\"%i]]", "def build_model(self):\n self.G = Generator(self.g_conv_dim)\n self.D = Discriminator(self.d_conv_dim, self.c_dim)\n self.generator = Generator(self.g_conv_dim).train(False)\n\n self.G = nn.DataParallel(self.G)\n self.D = nn.DataParallel(self.D)\n\n # For Adam (Unofficial)\n # self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])\n # self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])\n\n # For RMSprop(Official)\n self.g_optimizer = torch.optim.RMSprop(self.G.parameters(), lr=0.0001)\n self.d_optimizer = torch.optim.RMSprop(self.D.parameters(), lr=0.0001)\n\n self.accumulate(self.generator, self.G.module, 0)\n # self.print_network(self.G, 'G')\n # self.print_network(self.D, 'D')\n \n self.G.to(self.device)\n self.D.to(self.device)\n self.generator.to(self.device)\n\n # weight init\n self.G.apply(self.weights_init)\n self.D.apply(self.weights_init)\n self.generator.apply(self.weights_init)", "def _build_model(self):\n\n with tf.variable_scope(self.name):\n # adds placeholders, data_normalization and data_noise if desired. Also adds a placeholder for dropout probability\n self.layer_in_x, self.layer_in_y = self._build_input_layers()\n\n # create core multi-layer perceptron\n mlp_output_dim = 2 * self.ndim_y * self.n_centers + self.n_centers\n core_network = MLP(\n name=\"core_network\",\n input_layer=self.layer_in_x,\n output_dim=mlp_output_dim,\n hidden_sizes=self.hidden_sizes,\n hidden_nonlinearity=self.hidden_nonlinearity,\n output_nonlinearity=None,\n weight_normalization=self.weight_normalization,\n dropout_ph=self.dropout_ph if self.dropout else None\n )\n\n core_output_layer = core_network.output_layer\n\n # slice output of MLP into three equally sized parts for loc, scale and mixture weights\n slice_layer_locs = L.SliceLayer(core_output_layer, indices=slice(0, self.ndim_y * self.n_centers), axis=-1)\n slice_layer_scales = L.SliceLayer(core_output_layer, indices=slice(self.ndim_y * self.n_centers, 2 * self.ndim_y * self.n_centers), axis=-1)\n slice_layer_weights = L.SliceLayer(core_output_layer, indices=slice(2 * self.ndim_y * self.n_centers, mlp_output_dim), axis=-1)\n\n # locations mixture components\n self.reshape_layer_locs = L.ReshapeLayer(slice_layer_locs, (-1, self.n_centers, self.ndim_y))\n self.locs = L.get_output(self.reshape_layer_locs)\n\n # scales of the mixture components\n reshape_layer_scales = L.ReshapeLayer(slice_layer_scales, (-1, self.n_centers, self.ndim_y))\n self.softplus_layer_scales = L.NonlinearityLayer(reshape_layer_scales, nonlinearity=tf.nn.softplus)\n self.scales = L.get_output(self.softplus_layer_scales)\n\n # weights of the mixture components\n self.logits = L.get_output(slice_layer_weights)\n self.softmax_layer_weights = L.NonlinearityLayer(slice_layer_weights, nonlinearity=tf.nn.softmax)\n self.weights = L.get_output(self.softmax_layer_weights)\n\n # # put mixture components together\n self.y_input = L.get_output(self.layer_in_y)\n self.cat = cat = Categorical(logits=self.logits)\n self.components = components = [MultivariateNormalDiag(loc=loc, scale_diag=scale) for loc, scale\n in zip(tf.unstack(self.locs, axis=1), tf.unstack( self.scales, axis=1))]\n self.mixture = mixture = Mixture(cat=cat, components=components, value=tf.zeros_like(self.y_input))\n\n # regularization\n self._add_softmax_entropy_regularization()\n self._add_l1_l2_regularization(core_network)\n\n # tensor to store samples\n self.samples = mixture.sample() #TODO either use it or remove it\n\n # tensor to compute probabilities\n if self.data_normalization:\n self.pdf_ = mixture.prob(self.y_input) / tf.reduce_prod(self.std_y_sym)\n self.log_pdf_ = mixture.log_prob(self.y_input) - tf.reduce_sum(tf.log(self.std_y_sym))\n else:\n self.pdf_ = mixture.prob(self.y_input)\n self.log_pdf_ = mixture.log_prob(self.y_input)\n\n # symbolic tensors for getting the unnormalized mixture components\n if self.data_normalization:\n self.scales_unnormalized = self.scales * self.std_y_sym\n self.locs_unnormalized = self.locs * self.std_y_sym + self.mean_y_sym\n else:\n self.scales_unnormalized = self.scales\n self.locs_unnormalized = self.locs\n\n # initialize LayersPowered --> provides functions for serializing tf models\n LayersPowered.__init__(self, [self.softmax_layer_weights, self.softplus_layer_scales, self.reshape_layer_locs,\n self.layer_in_y])", "def build_model(self):\n input_pencil = tf.keras.Input((128,128,3))\n # generator's output\n gen_image = self.gan_generator.model(input_pencil)\n # generator's output\n x = self.gan_discriminator.model([input_pencil,gen_image])\n model = tf.keras.Model(input_pencil,[x,gen_image])\n # compiling the model\n model.compile(loss=['hinge', 'mae'], optimizer = self.optimizer,loss_weights=[1,100], metrics=['accuracy'])\n self.model = model", "def define(self, optimizer = Adam(lr=1e-5)): \n \n self.optimizer = optimizer\n\n model = Sequential()\n\n #Layer 1\n model.add(Conv2D( filters = 96, \n kernel_size = (11,11), \n strides = 4, \n padding = 'same', \n activation = 'relu', \n input_shape = (224, 224, 3), \n kernel_initializer = 'he_normal'))\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None)) # overlapping pooling\n #Layer 2\n model.add(Conv2D( filters = 256, \n kernel_size = (5,5), \n strides = 1, \n padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None)) \n #Layer 3\n model.add(Conv2D( filters = 384, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', kernel_initializer = 'he_normal'))\n #Layer 4\n model.add(Conv2D( filters = 384, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n #Layer 5\n model.add(Conv2D( filters = 256, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n #Layer 6\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None))\n \n #Layer 7\n model.add(Flatten())\n \n #Layer 8\n model.add(Dense( units = 4096, activation = 'relu'))\n model.add(Dense( units = 1024, activation = 'relu'))\n model.add(Dense( units = 512, activation = 'relu'))\n model.add(Dense( units = 256, activation = 'relu'))\n model.add(Dense( units = 128, activation = 'relu'))\n \n #Layer end\n model.add(Dense( units = 3, activation = 'softmax'))\n model.summary()\n \n self.model = model", "def build_model(nx, layers, activations, lambtha, keep_prob):\n model = K.Sequential()\n for i in range(len(layers)):\n model.add(K.layers.Dense(layers[i],\n activation=activations[i],\n input_shape=(nx,),\n kernel_regularizer=K.regularizers.l2(lambtha)))\n if i + 1 < len(layers):\n model.add(K.layers.Dropout(1 - keep_prob))\n return model", "def __init__(self, conf):\n self.model_conf = conf[\"model\"]\n self.epochs = self.model_conf.getint(\"n_epochs\")\n self.epoch = self.model_conf.getint(\"epoch_start\")\n self.batch_size = self.model_conf.getint(\"batch_size\")\n self.criterion = nn.CrossEntropyLoss()\n self.device = torch.device(self.model_conf.get('device'))\n #self.model = (\n # eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n #)\n self.model = nn.DataParallel(\n eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n )\n total_params = sum(p.numel() for p in self.model.parameters())\n print(\"Created model {}: {} parameters\"\n .format(self.model_conf.get('name'), total_params))\n if self.model_conf.get(\"optim\") == 'SGD':\n self.optimizer = optim.SGD(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n momentum=self.model_conf.getfloat(\"momentum\"),\n weight_decay=self.model_conf.getfloat(\"weight_decay\"))\n elif self.model_conf.get(\"optim\") == 'Adam':\n self.optimizer = optim.Adam(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n betas=json.loads(self.model_conf.get(\"betas\")))\n else:\n raise ValueError('Only SGD is supported')\n\n if self.model_conf.get(\"checkpoint\") is not None:\n self.load_checkpoint(self.model_conf.get(\"checkpoint\"))\n\n self.checkpoints_path = conf.get(\"paths\", \"checkpoints\")\n self.results_path = conf.get(\"paths\", \"results\")\n self.best_accuracy = 0\n self.train_size = None\n self.valid_size = None\n self.iteration_print_freq = conf.getint(\"log\", \"iteration_print_freq\")", "def build_model(self) -> nn.Module:\n pass", "def createModel(self):\n outputs, inputs = baseUNet(self.input_shape,\n self.conv_depth,\n self.n_classes,\n self.init_w,\n self.dropout)\n \n if self.regression == True:\n outputs = Lambda(getPropOfGround)(outputs)\n \n model = Model(inputs = inputs,outputs = outputs)\n \n model.compile(optimizer = self.optimizer,\n loss=self.loss_function,\n metrics=self.metrics)\n\n if self.old_weights != None:\n model.set_weights(self.old_weights)\n self.model = model", "def __init__(self, model, h_units, weight_decay, dropout_rate, num_of_outputs, training_name):\n \n # inherit class constructor attributes from tf.keras.Model\n super(fc_model, self).__init__()\n \n # model name\n self.model_name = None\n \n # type of model architecture\n self.model = model\n \n # checkpoint directory\n self.checkpoint_dir = \"../Saved_Models/\" + training_name + \"_\" + \"best_models/\"\n \n # checkpoint filepath \n self.checkpoint_path = None\n \n # create intended number of dqn_block attributes\n self.block_1 = fc_block(h_units[0], weight_decay[0], dropout_rate[0])\n self.block_2 = fc_block(h_units[1], weight_decay[1], dropout_rate[1])\n self.block_3 = fc_block(h_units[2], weight_decay[2], dropout_rate[2])\n \n # create final output layer attribute \n if self.model == \"DDPG_Actor\":\n \n # output layer with continuous action for each joint\n self.outputs = tf.keras.layers.Dense(num_of_outputs, activation = 'tanh')\n \n elif self.model == \"DDPG_Critic\": \n\n # output layer is state-action value, Q, for a given state and action\n self.outputs = tf.keras.layers.Dense(num_of_outputs)", "def define_model():\n input_1 = Input(shape=(2,), name=\"input_1\")\n hidden_1 = Dense(units=20, activation='sigmoid')(input_1)\n output = Dense(units=1, activation='sigmoid')(hidden_1)\n model = Model(inputs=input_1, outputs=output)\n model.compile(loss='mse', optimizer='sgd', metrics=['accuracy'])\n print(' current model summary ')\n print(model.summary())\n return model", "def _compile_model(\n self,\n ):\n self.model.training_model.compile(\n loss=tf.keras.losses.mean_squared_error,\n optimizer='adam',\n metrics=[\n tf.keras.metrics.mean_squared_error,\n tf.keras.metrics.mean_absolute_error\n ],\n )", "def __init__(self, opt):\n BaseModel.__init__(self, opt) # call the initialization method of BaseModel\n\n self.opt = opt\n if opt.d_loss_mode == 'wgan' and not opt.use_gp:\n raise NotImplementedError('using wgan on D must be with use_gp = True.')\n\n self.loss_names = ['G_real', 'G_fake', 'D_real', 'D_fake', 'D_gp', 'G', 'D']\n self.visual_names = ['real_visual', 'gen_visual']\n\n if self.isTrain: # only defined during training time\n self.model_names = ['G', 'D']\n else:\n self.model_names = ['G']\n\n if self.opt.cgan:\n probs = np.ones(self.opt.cat_num)/self.opt.cat_num \n self.CatDis = Categorical(torch.tensor(probs))\n\n # define networks \n self.netG = networks.define_G(opt.z_dim, opt.output_nc, opt.ngf, opt.netG,\n opt.g_norm, opt.cgan, opt.cat_num, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc\n self.netD = networks.define_D(opt.input_nc, opt.ndf, opt.netD,\n opt.d_norm, opt.cgan, opt.cat_num, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain: # only defined during training time\n # define G mutations \n self.G_mutations = []\n for g_loss in opt.g_loss_mode: \n self.G_mutations.append(networks.GANLoss(g_loss, 'G', opt.which_D).to(self.device))\n # define loss functions\n self.criterionD = networks.GANLoss(opt.d_loss_mode, 'D', opt.which_D).to(self.device)\n # initialize optimizers\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr_g, betas=(opt.beta1, opt.beta2))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr_d, betas=(opt.beta1, opt.beta2))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n \n # Evolutinoary candidatures setting (init) \n\n self.G_candis = [] \n self.optG_candis = [] \n self.last_evaly = []\n self.last_evalimgs = []\n self.min_Fq = 100.0\n self.max_Fq = -100.0\n self.min_Fd = 100.0\n self.max_Fd = -100.0\n self.normFq = lambda f : (f-self.min_Fq) / (self.max_Fq-self.min_Fq)\n self.normFd = lambda f : (f-self.min_Fd) / (self.max_Fd-self.min_Fd)\n for i in range(opt.candi_num): \n self.G_candis.append(copy.deepcopy(self.netG.state_dict()))\n self.optG_candis.append(copy.deepcopy(self.optimizer_G.state_dict()))\n \n # visulize settings \n self.N =int(np.trunc(np.sqrt(min(opt.batch_size, 64))))\n if self.opt.z_type == 'Gaussian': \n self.z_fixed = torch.randn(self.N*self.N, opt.z_dim, 1, 1, device=self.device) \n elif self.opt.z_type == 'Uniform': \n self.z_fixed = torch.rand(self.N*self.N, opt.z_dim, 1, 1, device=self.device)*2. - 1. \n if self.opt.cgan:\n yf = self.CatDis.sample([self.N*self.N])\n self.y_fixed = one_hot(yf, [self.N*self.N, self.opt.cat_num])\n\n # the # of image for each evluation\n self.eval_size = max(math.ceil((opt.batch_size * opt.D_iters) / opt.candi_num), opt.eval_size)", "def _build_model(self):\n\n # Placeholders for our input\n # Our input are FRAMES_STATE RGB frames of shape of the gridworld\n self.X_pl = tf.placeholder(shape=[None, self.x_size, self.y_size,\n self.frames_state]\n , dtype=tf.uint8, name=\"X\")\n # The TD target value\n self.y_pl = tf.placeholder(shape=[None], dtype=tf.float32, name=\"y\")\n # Integer id of which action was selected\n self.actions_pl = tf.placeholder(shape=[None], dtype=tf.int32, name=\"actions\")\n\n X = tf.to_float(self.X_pl) / 255.0\n batch_size = tf.shape(self.X_pl)[0]\n\n # NETWORK ARCHITECTURE\n # tf.contrib.layers.conv2d(input, num_outputs, kernel_size, stride)\n conv1 = tf.contrib.layers.conv2d(X, 64, 2, 1, activation_fn=tf.nn.relu)\n # try with padding = 'VALID'\n # pool1 = tf.contrib.layers.max_pool2d(conv1, 2)\n # conv2 = tf.contrib.layers.conv2d(pool1, 32, WX, 1, activation_fn=tf.nn.relu)\n\n # Fully connected layers\n flattened = tf.contrib.layers.flatten(conv1)\n fc1 = tf.contrib.layers.fully_connected(flattened, 64)\n self.predictions = tf.contrib.layers.fully_connected(fc1, self.actions_num)\n\n # Get the predictions for the chosen actions only\n gather_indices = tf.range(batch_size) * tf.shape(self.predictions)[1] + self.actions_pl\n self.action_predictions = tf.gather(tf.reshape(self.predictions, [-1]), gather_indices)\n\n # Calcualte the loss\n self.losses = tf.squared_difference(self.y_pl, self.action_predictions)\n self.loss = tf.reduce_mean(self.losses)\n\n # Optimizer Parameters from original paper\n self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)\n self.train_op = self.optimizer.minimize(self.loss, global_step=tf.train.get_global_step())", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n\n self.loss_names = ['G_SH']\n self.visual_names = ['input', 'pr_SH', 'gt_SH']\n self.model_names = ['G1']\n\n if not opt.no_brightness:\n self.loss_names += ['G_BA', 'G_BC']\n self.visual_names += ['pr_BA', 'gt_BA']\n self.model_names += ['G3']\n\n if opt.reg_LTM:\n self.loss_names += ['LTMReg']\n\n self.light_res = opt.light_res\n\n\n # Intrinsic network\n if opt.latent_Ls or opt.latent_Lt:\n netG1name = 'unet_256_latent_inL'\n else:\n netG1name = 'unet_256_latent'\n\n input_nc = opt.input_nc\n if opt.in_Ls:\n input_nc += 1\n if opt.in_Lt:\n input_nc += 1\n\n if opt.LTM:\n self.dim_LTM = self.light_res**2\n if self.opt.enc_LTM:\n self.dim_LTM = opt.dim_LTM\n use_hidden = True if not opt.enc_ill_hid==-1 else False\n self.enc_LTM = networks.init_net(networks.IlluminationEncoder(self.light_res**2, opt.enc_ill_hid, self.dim_LTM, use_hidden), opt.init_type, opt.init_gain, self.gpu_ids)\n\n self.netG1 = networks.define_G(input_nc, self.dim_LTM, opt.ngf, netG1name, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, True, self.gpu_ids)\n\n else:\n if opt.no_latent_color:\n output_nc = 3\n else:\n output_nc = 1\n self.netG1 = networks.define_G(input_nc, output_nc, opt.ngf, netG1name, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, False, self.gpu_ids)\n\n # Brightness network\n g3_input_nc = 3\n if opt.cas and opt.cat_In:\n g3_input_nc = g3_input_nc + opt.input_nc\n if not opt.cas:\n if opt.in_Ls:\n g3_input_nc += 1\n if opt.in_Lt:\n g3_input_nc += 1\n self.netG3 = networks.define_G(g3_input_nc, 1, opt.ngf, 'resnet_9blocks_latent', opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, False, self.gpu_ids)\n if self.isTrain:\n # define loss functions\n self.criterionS = torch.nn.MSELoss()\n self.criterionBA = torch.nn.MSELoss()\n # self.criterionBP = torch.nn.MSELoss()\n self.criterionBC = torch.nn.MSELoss()\n self.criterionReg = torch.nn.MSELoss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizer_G1 = torch.optim.Adam(self.netG1.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n # self.optimizer_G2 = torch.optim.Adam(self.netG2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_G3 = torch.optim.Adam(self.netG3.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G1)\n # self.optimizers.append(self.optimizer_G2)\n self.optimizers.append(self.optimizer_G3)", "def build_model():\n mdl = Sequential()\n\n # normalization\n mdl.add(Lambda(lambda x: x/128. - 1, input_shape=IMAGE_SHAPE, name=\"input\"))\n\n # trim image\n mdl.add(Lambda(lambda x: x[:, 10:-10, :, :]))\n\n # convolutions\n mdl.add(Convolution2D(24, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(36, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(48, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Flatten())\n\n mdl.add(Dense(128, activation='relu'))\n mdl.add(Dense(64, activation='relu'))\n mdl.add(Dense(1, name=\"output\"))\n\n mdl.summary()\n\n return mdl", "def train(model, x_train, y_train, x_valid, y_valid, config):\n \n epochs = config['epochs']\n threshold = config['early_stop_epoch']\n alpha = config['learning_rate']\n# val_loss = 10000*np.ones((epochs,1))\n beta = config['momentum_gamma']\n batch_size = config['batch_size']\n \n N = x_train.shape[0]\n num_batches = int((N+batch_size -1 )/ batch_size)\n \n best_weight = []\n best_epoch = []\n best_bias = []\n #print(len(model.layers))\n train_loss_list = []\n \n train_acc_list = []\n val_acc_list = []\n val_loss_list = []\n \n counter = 0\n \n lam = 0.0001\n \n \n for i in range(1, epochs+1):\n shuffled_indices = np.random.permutation(range(N))\n \n for batch in range(num_batches):\n minibatch_indices = shuffled_indices[batch_size*batch:min(batch_size*(batch+1), N)]\n #print(len(minibatch_indices))\n xbatch = x_train[minibatch_indices, :]\n ybatch = y_train[minibatch_indices, :]\n #print(ybatch.shape)\n y, loss = model(xbatch, ybatch)\n \n model.backward() \n #weight update and storing\n for k in range(0, len(config['layer_specs']), 2):\n mom_w = -model.layers[k].d_v_w * beta + alpha*(model.layers[k].d_w + lam*model.layers[k].w )\n mom_b = -model.layers[k].d_v_b * beta + alpha*(model.layers[k].d_b + lam*model.layers[k].b )\n model.layers[k].w = model.layers[k].w - (mom_w )\n model.layers[k].b = model.layers[k].b - (mom_b )\n model.layers[k].d_v_w = -mom_w\n model.layers[k].d_v_b = -mom_b \n\n y, loss = model(x_train, y_train) \n train_loss_list.append(loss)\n \n train_pred = np.argmax(y, axis=1) \n acc = np.mean(np.argwhere(y_train==1)[:,1]==train_pred) \n \n train_acc_list.append(acc)\n \n \n #print(\"Training acc for epoch \", i, \" is:\\n\", acc) \n #print(\"Training loss for epoch \", i, \" is:\\n\", loss) \n val_y, val_loss = model(x_valid, y_valid)\n val_loss_list.append(val_loss)\n\n val_pred = np.argmax(val_y, axis=1) \n acc = np.mean(np.argwhere(y_valid==1)[:,1]==val_pred) \n val_acc_list.append(acc)\n \n #print(\"Validation acc for epoch \", i, \" is:\\n\", acc) \n #print(\"Validation loss for epoch \", i, \" is:\\n\", val_loss)\n if(i>1 and val_loss <min(val_loss_list[:-1])):\n #update best weights\n counter = 0\n weight = []\n bias = []\n for k in range(0, len(config['layer_specs']), 2):\n weight.append(model.layers[k].w)\n bias.append(model.layers[k].b)\n best_weight = weight \n best_bias = bias\n best_epoch = i\n else:\n counter +=1\n \n if counter > threshold:\n print(\"best epoch:\", best_epoch)\n break\n\n# if(i>=6 and val_loss[i-1]>=val_loss[i-2] and val_loss[i-2]>=val_loss[i-3]and val_loss[i-3]>=val_loss[i-4]and val_loss[i-4]>=val_loss[i-5]and val_loss[i-5]>=val_loss[i-6]):\n# break\n \n print(len(best_weight))\n print('Epoch: ', i)\n #print(val_loss)\n p = 0\n for k in range(0, len(config['layer_specs']), 2):\n model.layers[k].w = best_weight[p]\n model.layers[k].b = best_bias[p]\n p = p + 1\n \n return train_loss_list, val_loss_list, train_acc_list, val_acc_list\n raise NotImplementedError(\"Train method not implemented\")", "def _model(self):\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(units=64, input_dim=self.state_size, activation=\"relu\"))\n\t\tmodel.add(Dense(units=32, activation=\"relu\"))\n\t\tmodel.add(Dense(units=16, activation=\"relu\"))\n\t\tmodel.add(Dense(units=8, activation=\"relu\"))\n\t\tmodel.add(Dense(self.action_size, activation=\"linear\"))\n\t\tmodel.compile(loss=\"mse\", optimizer=Adam(lr=self.learning_rate))\n\n\t\treturn model", "def _create_loss(self):\n\n with tf.name_scope(\"loss\"):\n \n # gini=(tf.nn.l2_loss( self.score))/100000\n gini = tf.losses.softmax_cross_entropy(self.score, 0*self.score)\n \n promo_prob=tf.reduce_sum(tf.multiply(self.score, self.cohort_weight),\n axis=1)\n inc_value = tf.reduce_mean(tf.multiply(promo_prob, self.value))- self.control_value\n inc_cost = tf.reduce_mean( tf.multiply(promo_prob, self.cost)) - self.control_cost\n \n\n\n # determine loss function based on self.obj_rule\n if self.obj_rule == 'cpiv':\n self.objective = inc_cost / inc_value\n\n elif self.obj_rule == 'ivc':\n # maximize ivc\n self.objective = - inc_value / inc_cost\n\n elif self.obj_rule == 'lagrangian':\n assert self.shadow is not None, 'Need to pass in shadow value if use lagrangian as obj_rule.'\n self.objective = inc_cost - self.shadow * inc_value\n\n elif self.obj_rule == 'value':\n # maximize delta values\n self.objective = - inc_value\n\n # use only cost as objective\n elif self.obj_rule == 'cost':\n # maximize delta cost\n self.objective = - inc_cost\n\n else:\n raise Exception('Invalid obj_rule!')\n\n # regularization\n reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n # weights = tf.trainable_variables() # all vars of your graph\n # reg_loss = tf.norm( weights,ord=1)\n\n # final loss\n self.loss = self.objective +reg_loss+.1*gini", "def train_model(model, loss_fn, optimizer, train_generator, dev_generator, EXP):\n prev_loss = np.Infinity\n prev_acc = 0\n trained_model = model # to hold best model\n epochs = 150\n every = 1\n train_loss_track = []\n dev_loss_track = []\n train_acc_track = []\n dev_acc_track = []\n\n train_num_batch = 480/30\n num_batch = 60/30\n for epoch in range(epochs):\n\n training_loss, training_accuracy = 0.0, 0.0\n train_gold = []\n train_pred = []\n # Set network into train set\n model.train()\n hidden = None\n for batch_x, batch_y in train_generator:\n # reset optimizer\n optimizer.zero_grad()\n # Predict outputs\n batch_x = batch_x.permute(1, 0, 2)\n outputs = model(batch_x)\n\n # Calculate the loss\n train_gold.extend(batch_y.cpu().detach().numpy())\n train_pred.extend(outputs.argmax(1).cpu().detach().numpy())\n loss = loss_fn(outputs, batch_y)\n # Backward and update step\n loss.backward()\n optimizer.step()\n\n training_loss += loss.detach().item()\n training_loss = training_loss/train_num_batch\n\n train_accuracy = accuracy_score(train_gold, train_pred)\n print('Epoch: ' + str(epoch) + ', Total train Loss: ' + str(training_loss)\n + ', Total train accu: ' + str(round(train_accuracy * 100, 2)) + \"%\")\n train_loss_track.append(training_loss)\n train_acc_track.append(train_accuracy)\n\n if epoch % every == 0:\n # Set network into development set\n val_gold = []\n val_pred = []\n dev_loss, dev_accuracy = 0.0, 0.0\n with torch.no_grad(): # set not gradient\n model.eval()\n # optimizer.zero_grad()\n\n for batch_x, batch_y in dev_generator:\n batch_x = batch_x.permute(1, 0, 2)\n outputs = model(batch_x)\n\n # Add predictions and gold labels\n val_gold.extend(batch_y.cpu().detach().numpy())\n val_pred.extend(outputs.argmax(1).cpu().detach().numpy())\n\n dev_loss += loss_fn(outputs.double(), batch_y.long()).detach().item()\n\n dev_accuracy = accuracy_score(val_gold, val_pred)\n f1 = f1_score(val_gold, val_pred, average='macro')\n dev_loss = dev_loss/num_batch\n print('Dev Epoch: ' + str(epoch) + ', Total dev Loss: ' + str(dev_loss)\n + ', Total dev accu: ' + str(round(dev_accuracy*100, 3)) + \"%\")\n\n if dev_accuracy > prev_acc:\n print(f\"saving model... loss: {dev_loss}\")\n # prev_loss = dev_loss\n prev_acc = dev_accuracy\n trained_model = model\n torch.save(trained_model, f\"./models/best_model_{EXP}.pth\")\n dev_loss_track.append(dev_loss)\n dev_acc_track.append(dev_accuracy)\n tracks = pd.DataFrame()\n tracks['train_loss'] = train_loss_track\n tracks['train_acc'] = train_acc_track\n tracks['dev_loss'] = dev_loss_track\n tracks['dev_acc'] = dev_acc_track\n\n print(tracks)\n tracks.to_csv(f\"history_{EXP}.csv\")\n pickle.dump(tracks, open(f\"history_{EXP}.pkl\", 'wb'), protocol=4)\n\n return trained_model", "def create_model():\n model = Sequential()\n\n model.add(Lambda(lambda x: x/127.5-1.0, input_shape=INPUT_SHAPE))\n\n # Now we are going to add some Convulation Layers identical to paper\n\n model.add(Conv2D(24, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n\n # And now finally we will Flatten our layers and eventually use Fully Connected Layers to reduce features.\n\n model.add(Dropout(0.4))\n model.add(Flatten())\n\n model.add(Dense(256, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(100, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(25, activation='elu'))\n model.add(Dense(1))\n\n model.summary()\n\n return model", "def create_org_model( width=28, \r\n height=28, channel=1, verbose=True,epochs=10):\r\n input1 = Input(\r\n shape=(\r\n width,\r\n height,\r\n channel,\r\n ), name='concat_input')\r\n conv1 = Conv2D(32, kernel_size=5, activation='relu', padding='same')\r\n conv2 = Conv2D(32, kernel_size=5, activation='relu', padding='same')\r\n conv3 = Conv2D(64, kernel_size=3, activation='relu', padding='same')\r\n conv4 = Conv2D(64, kernel_size=3, activation='relu', padding='same')\r\n dense1 = Dense(256, activation='relu')\r\n predict = Dense(10, activation='softmax')\r\n\r\n conv1o = conv1(input1)\r\n conv2o = conv2(conv1o)\r\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv2o)\r\n drop1 = Dropout(.25)(pool1)\r\n conv3o = conv3(drop1)\r\n conv4o = conv4(conv3o)\r\n pool2 = MaxPooling2D(pool_size=(2, 2), strides=(2,2))(conv4o)\r\n drop2 = Dropout(.25)(pool2)\r\n drop2f = Flatten()(drop2)\r\n fc1 = dense1(drop2f)\r\n softmax1 = predict(fc1)\r\n\r\n drop2_2 = Input(shape=(7,7,64), name='concat_input') \r\n drop2f_2 = Flatten()(drop2_2)\r\n fc1_2 = dense1(drop2f_2)\r\n softmax1_2 = predict(fc1_2)\r\n\r\n mlp = Model(input1, softmax1)\r\n optimizer = tf.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)\r\n mlp.compile(\r\n loss='sparse_categorical_crossentropy',\r\n optimizer=optimizer,\r\n metrics=['accuracy'])\r\n\r\n\r\n mlp.load_weights(model_dir+'complete_model.h5')\r\n\r\n for layer in mlp.layers:\r\n layer.trainable = False\r\n\r\n feature_model = Model(input1, drop2)\r\n predict_model = Model(drop2_2, softmax1_2)\r\n\r\n return feature_model, predict_model, mlp", "def build_model(self,nn1=32,nn2=64,lr=0.01,dp=0.1,decay=1e-4,dn1=50,dn2=100):\n\n opt = Adam(lr=self.lr, beta_1=0.9, beta_2=0.999, decay=self.decay)\n model = models.Sequential()\n model.add(Conv1D(filters=self.nn1, kernel_size=3, padding=\"same\", input_shape=(self.n_stp, self.n_feats)))\n model.add(PReLU(alpha_initializer=Constant(value=0.20)))\n model.add(Conv1D(filters=self.nn2, kernel_size=2, padding=\"same\"))\n model.add(MaxPool1D(pool_size=1))\n model.add(PReLU(alpha_initializer=Constant(value=0.20)))\n\n model.add(Dropout(self.dp))\n model.add(Flatten())\n model.add(Dense(self.dn1))\n model.add(PReLU(alpha_initializer=Constant(value=0.20)))\n model.add(Dense(self.dn2))\n model.add(PReLU(alpha_initializer=Constant(value=0.20)))\n model.add(Dense(1))\n model.add(Activation('relu'))\n\n model.compile(loss=\"mse\",\n optimizer=opt,\n metrics=[self.soft_acc])\n\n return model", "def create_model() -> Model:\n # Create a neural network model that includes several dense layers with hyperbolic tangent activations, L2 regularization, and batch normalization\n regularizer = l2(0)\n dropout = 0\n activation = 'tanh'\n model = Sequential([\n InputLayer(input_shape=(16,)),\n BatchNormalization(),\n Dense(12, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(8, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(1, kernel_regularizer=regularizer)\n ])\n # Output a summary of the model's architecture\n print(model.summary())\n # Use a mean squared error loss function and an Adam optimizer; do not print accuracy because this is a regression task\n model.compile(\n optimizer='adam',\n loss='mse',\n metrics=['mae']\n )\n # Return the untrained model\n return model", "def build(self):\n\n self.W_AA = self.init([self.n_atom_input_feat, self.n_hidden_AA])\n self.b_AA = model_ops.zeros(shape=[\n self.n_hidden_AA,\n ])\n\n self.W_PA = self.init([self.n_pair_input_feat, self.n_hidden_PA])\n self.b_PA = model_ops.zeros(shape=[\n self.n_hidden_PA,\n ])\n\n self.W_A = self.init([self.n_hidden_A, self.n_atom_output_feat])\n self.b_A = model_ops.zeros(shape=[\n self.n_atom_output_feat,\n ])\n\n self.trainable_weights = [\n self.W_AA, self.b_AA, self.W_PA, self.b_PA, self.W_A, self.b_A\n ]\n if self.update_pair:\n self.W_AP = self.init([self.n_atom_input_feat * 2, self.n_hidden_AP])\n self.b_AP = model_ops.zeros(shape=[\n self.n_hidden_AP,\n ])\n\n self.W_PP = self.init([self.n_pair_input_feat, self.n_hidden_PP])\n self.b_PP = model_ops.zeros(shape=[\n self.n_hidden_PP,\n ])\n\n self.W_P = self.init([self.n_hidden_P, self.n_pair_output_feat])\n self.b_P = model_ops.zeros(shape=[\n self.n_pair_output_feat,\n ])\n\n self.trainable_weights.extend(\n [self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P])", "def _build_model_internal(self, opts):\n data_shape = self._data.data_shape\n\n # Placeholders\n real_points_ph = tf.placeholder(\n tf.float32, [None] + list(data_shape), name='real_points_ph')\n noise_ph = tf.placeholder(\n tf.float32, [None] + [opts['latent_space_dim']], name='noise_ph')\n is_training_ph = tf.placeholder(tf.bool, name='is_train_ph')\n lr_decay_ph = tf.placeholder(tf.float32)\n\n\n # Operations\n\n latent_x_mean, log_latent_sigmas = self.discriminator(\n opts, real_points_ph, is_training_ph)\n scaled_noise = tf.multiply(\n tf.sqrt(1e-6 + tf.exp(log_latent_sigmas)), noise_ph)\n loss_kl = 0.5 * tf.reduce_sum(\n tf.exp(log_latent_sigmas) +\n tf.square(latent_x_mean) -\n log_latent_sigmas, axis=1)\n if opts['recon_loss'] == 'l2sq':\n reconstruct_x = self.generator(opts, latent_x_mean + scaled_noise,\n is_training_ph)\n loss_reconstruct = tf.reduce_sum(\n tf.square(real_points_ph - reconstruct_x), axis=[1,2,3])\n loss_reconstruct = loss_reconstruct / 2. / opts['vae_sigma']\n elif opts['recon_loss'] == 'cross_entropy':\n if opts['input_normalize_sym']:\n expected = (real_points_ph + 1.0) / 2.0\n else:\n expected = real_points_ph\n reconstruct_x_logits = self.generator(\n opts, latent_x_mean + scaled_noise,\n is_training_ph, return_logits=True)\n loss_reconstruct = tf.reduce_sum(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=expected, logits=reconstruct_x_logits),\n axis=[1,2,3])\n else:\n raise ValueError(\"Unknown recon loss value %s\" % opts['recon_loss'])\n dec_enc_x = self.generator(opts, latent_x_mean,\n is_training=False, reuse=True)\n\n loss_reconstruct = tf.reduce_mean(loss_reconstruct)\n loss_kl = tf.reduce_mean(loss_kl)\n loss = loss_kl + loss_reconstruct\n # loss = tf.Print(loss, [loss, loss_kl, loss_reconstruct], 'Loss, KL, reconstruct')\n optim = ops.optimizer(opts, decay=lr_decay_ph).minimize(loss)\n\n generated_images = self.generator(opts, noise_ph,\n is_training_ph, reuse=True)\n\n self._real_points_ph = real_points_ph\n self._noise_ph = noise_ph\n self._is_training_ph = is_training_ph\n self._optim = optim\n self._loss = loss\n self._loss_reconstruct = loss_reconstruct\n self._lr_decay_ph = lr_decay_ph\n self._loss_kl = loss_kl\n self._generated = generated_images\n self._reconstruct_x = dec_enc_x\n self._enc_mean = latent_x_mean\n self._enc_log_var = log_latent_sigmas\n\n saver = tf.train.Saver(max_to_keep=10)\n tf.add_to_collection('real_points_ph', self._real_points_ph)\n tf.add_to_collection('noise_ph', self._noise_ph)\n tf.add_to_collection('is_training_ph', self._is_training_ph)\n tf.add_to_collection('encoder_mean', self._enc_mean)\n tf.add_to_collection('encoder_log_sigma', self._enc_log_var)\n tf.add_to_collection('decoder', self._generated)\n\n self._saver = saver\n\n logging.error(\"Building Graph Done.\")", "def model(image_height,image_width,path):\n\n\tdef load_file(path='vgg19.mat'):\n\t\t\"\"\"\n\t\tLoads Weights File & returns Object of Numpy array\n\t\t\"\"\"\n\t\tfile=loadmat(path)\n\t\tfile=file['layers']\n\t\tprint(\"Success load_file\")\n\t\treturn file\n\n\tdef ret_layer_index(file):\n\t\t\"\"\"\n\t\tTakes file as input & returns a dictionary having name of layers with their code\n\t\t\"\"\"\n\t\tnames={}\n\t\tfor i in range(len(file[0])):\n\t\t\tprint(file[0][i][0][0][0])\n\t\t\tnames[file[0][i][0][0][0][0]]=i\n\t\tprint(\"Success layer_index\")\n\t\treturn names\n \n\tdef weight(layer_name):\n\t\t\"\"\" Asks for Layer Name & returns its weights & bias\n\t\t\"\"\"\n\t\tlayer_no=names[layer_name]\n\t\twb =file[0][layer_no][0][0][2]\n\t\tw=wb[0][0]\n\t\tb=wb[0][1]\n\t\tname=file[0][layer_no][0][0][0]\n\t\tassert name==layer_name\n\t\tprint(\"Success weight\")\n\t\treturn w,b\n\n\tdef conv_relu(prev_layer,layer_no,layer_name):\n\t\tW,b=weight(layer_name)\n\t\tW=tf.constant(W)\n\t\tb=tf.constant(np.reshape(b, (b.size)))\n\t\tl=tf.nn.conv2d(prev_layer,filter=W,strides=[1,1,1,1],padding='SAME') +b\n\t\tprint(\"Success convrelu\")\n\t\treturn tf.nn.relu(l)\n\n\tdef avg_pool(prev_layer):\n\t\treturn tf.nn.avg_pool(prev_layer,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')\n\n\tdef load_graph():\n\t\tgraph={}\n\t\tgraph['input'] = tf.Variable(np.zeros((1, image_height, image_width,3)), dtype = 'float32')\n\t\tgraph['conv1_1'] = conv_relu(graph['input'], 0, 'conv1_1')\n\t\tgraph['conv1_2'] = conv_relu(graph['conv1_1'], 2, 'conv1_2')\n\t\tgraph['avgpool1'] = avg_pool(graph['conv1_2'])\n\t\tgraph['conv2_1'] = conv_relu(graph['avgpool1'], 5, 'conv2_1')\n\t\tgraph['conv2_2'] = conv_relu(graph['conv2_1'], 7, 'conv2_2')\n\t\tgraph['avgpool2'] = avg_pool(graph['conv2_2'])\n\t\tgraph['conv3_1'] = conv_relu(graph['avgpool2'], 10, 'conv3_1')\n\t\tgraph['conv3_2'] = conv_relu(graph['conv3_1'], 12, 'conv3_2')\n\t\tgraph['conv3_3'] = conv_relu(graph['conv3_2'], 14, 'conv3_3')\n\t\tgraph['conv3_4'] = conv_relu(graph['conv3_3'], 16, 'conv3_4')\n\t\tgraph['avgpool3'] = avg_pool(graph['conv3_4'])\n\t\tgraph['conv4_1'] = conv_relu(graph['avgpool3'], 19, 'conv4_1')\n\t\tgraph['conv4_2'] = conv_relu(graph['conv4_1'], 21, 'conv4_2')\n\t\tgraph['conv4_3'] = conv_relu(graph['conv4_2'], 23, 'conv4_3')\n\t\tgraph['conv4_4'] = conv_relu(graph['conv4_3'], 25, 'conv4_4')\n\t\tgraph['avgpool4'] = avg_pool(graph['conv4_4'])\n\t\tgraph['conv5_1'] = conv_relu(graph['avgpool4'], 28, 'conv5_1')\n\t\tgraph['conv5_2'] = conv_relu(graph['conv5_1'], 30, 'conv5_2')\n\t\tgraph['conv5_3'] = conv_relu(graph['conv5_2'], 32, 'conv5_3')\n\t\tgraph['conv5_4'] = conv_relu(graph['conv5_3'], 34, 'conv5_4')\n\t\tgraph['avgpool5'] = avg_pool(graph['conv5_4'])\n\t\treturn graph\n\n\tfile=load_file(path)\n\tnames=ret_layer_index(file)\n\treturn load_graph()", "def model(name, conv_dropout_keep_prob=1.0, fc_dropout_keep_prob=1.0, reuse=None):\n with tf.variable_scope(name, reuse=reuse) as scope:\n # input format\n input_shape = [100, 100, 60] # can't actually get rid of this \n num_labels = 2\n\n # architecture\n k = 5\n depth_1 = 32\n depth_2 = 32\n \n k = 3\n depth_3 = 64\n depth_4 = 64\n \n fc_num_0 = conv_to_fc_size(input_shape, conv_depth=depth_4, pools=2)\n fc_num_1 = 32\n\n # regularizers\n reg_conv = tf.contrib.layers.l2_regularizer(scale=1e-6)\n reg_fc = tf.contrib.layers.l2_regularizer(scale=1e-6)\n\n # 2 convolution and pooling layers\n conv_1 = conv_relu_layer(x_batch,\n 5, 1, depth_1,\n regularizer=reg_conv,\n dropout_keep_prob=conv_dropout_keep_prob,\n name='conv_1')\n# conv_1 = maxpool_layer(conv_1, name='maxpool_1')\n\n conv_2 = conv_relu_layer(conv_1,\n 5, depth_1, depth_2,\n regularizer=reg_conv,\n dropout_keep_prob=conv_dropout_keep_prob,\n name='conv_2')\n conv_2 = maxpool_layer(conv_2, name='maxpool_2')\n\n conv_3 = conv_relu_layer(conv_2,\n 3, depth_2, depth_3,\n regularizer=reg_conv,\n dropout_keep_prob=conv_dropout_keep_prob,\n name='conv_3')\n\n conv_4 = conv_relu_layer(conv_3,\n 3, depth_3, depth_4,\n regularizer=reg_conv,\n dropout_keep_prob=conv_dropout_keep_prob,\n name='conv_4')\n \n conv_4 = maxpool_layer(conv_4, name='maxpool_2')\n\n # 1 fully connected layer\n fc_1 = fc_layer(conv_4, \n fc_num_0, fc_num_1, \n conv_input=True, \n activation=tf.nn.relu,\n regularizer=reg_fc,\n dropout_keep_prob=fc_dropout_keep_prob,\n name='fc_1')\n\n # output\n logits = fc_layer(fc_1,\n fc_num_1, num_labels, \n regularizer=reg_fc,\n dropout_keep_prob=fc_dropout_keep_prob,\n name='out')\n\n return logits", "def create_model():\n model = tf.keras.Sequential([\n Conv2D(256, 5, input_shape=[28, 28, 1]),\n BatchNormalization(),\n Activation('relu'),\n \n Conv2D(256, 5),\n BatchNormalization(),\n Activation('relu'),\n \n Conv2D(256, 5),\n MaxPooling2D(2),\n BatchNormalization(),\n Activation('relu'),\n \n Flatten(),\n Dense(256),\n Dropout(.2),\n \n Dense(10),\n Activation('softmax')\n ])\n \n return model", "def create_model_eg1(my_learning_rate):\n # This is a first try to get a simple model that works\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Flatten(input_shape=(8, 8, 15)))\n model.add(tf.keras.layers.Dense(units=32, activation='relu'))\n model.add(tf.keras.layers.Dense(units=32, activation='relu'))\n model.add(tf.keras.layers.Dense(units=1))\n\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=my_learning_rate),\n loss=\"mean_squared_error\",\n metrics=['MeanSquaredError'])\n\n return model", "def nn_model():\n seed = 321\n np.random.seed(seed)\n rmsprop = RMSprop(lr=0.0001)\n # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n # kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)\n # for train, test in kfold.split(X, y):\n model_nn = Sequential()\n model_nn.add(Dense(100, input_shape=(117,), activation='relu'))\n model_nn.add(Dropout(0.5))\n model_nn.add(Dense(125, activation='relu', kernel_initializer='normal'))\n model_nn.add(Dropout(0.5))\n model_nn.add(Dense(30, activation='relu', kernel_initializer='normal'))\n model_nn.add(Dropout(0.5))\n model_nn.add(Dense(1, activation='sigmoid'))#softmax\n model_nn.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=rmsprop)\n #model_nn.compile(loss='binary_crossentropy', metrics=['accuracy'], optimizer=rmsprop)\n # Compile model\n model_nn.compile(optimizer=rmsprop, loss='binary_crossentropy', metrics=['accuracy'])\n return model_nn", "def update_model(engine, batch):\n\t\tengine.model.train()\n\t\tengine.model.rpn.nms_thresh = 0.7\n\t\timg, target = prepare_batch(batch, device=get_device(engine.model))\n\t\tengine.optimizer.zero_grad()\n\t\tloss = engine.model(img, target)\n\t\tlosses = sum(l for l in loss.values())\n\t\tlosses.backward()\n\t\tengine.optimizer.step()\n\t\treturn loss", "def create_model(input_shape):\n h, w, c = input_shape\n\n\n # %%%\n inpt = tf.placeholder(tf.float32, (None,h,w,c), 'attributes')\n inpt_flattenned = tf.contrib.layers.flatten(inpt) #tf.reshape(inpt, [-1, h*w*c])\n H1 = tf.layers.dense(inpt_flattenned, units=200, activation=tf.sigmoid)\n H2 = tf.layers.dense(H1, units=20, activation=tf.sigmoid)\n encoding = tf.layers.dense(H2, units=latent_space_size, activation=tf.nn.sigmoid)\n H22 = tf.layers.dense(encoding, units=20, activation=tf.sigmoid)\n H11 = tf.layers.dense(H22, units=200, activation=tf.nn.sigmoid)\n dec_flt = tf.layers.dense(H11, units=h*w*c)\n decode = tf.reshape(dec_flt, [-1, h,w,c])\n# The results which were the most pleasing to the eye were achieved with the following loss function, 2 hidden layers and the latent spaces of tens of dimentions. \n# cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=dec_flt, labels=tf.sigmoid(inpt_flattenned)))\n cost = tf.reduce_mean(tf.square(dec_flt-inpt_flattenned))\n # %%%\n\n model = {'cost': cost,\n 'input': input,\n 'enc': encoding,\n 'dec': decode\n }\n return model", "def train_model(self,model):\r\n \r\n train_state = {'stop_early': False,\r\n 'early_stopping_step': 0,\r\n 'early_stopping_best_val': 1e8,\r\n 'learning_rate': self.lr,\r\n 'epoch_index': 0,\r\n 'train_loss': [],\r\n 'val_loss': [],\r\n 'best_model':model}\r\n \r\n dataset = self.dataset\r\n loss_fn = self.loss_fn\r\n \r\n dataset.set_split('train')\r\n print(\"Training module with \"+str(len(dataset))+\" examples\")\r\n \r\n data_loader = DataLoader(dataset,batch_size=self.batch_size,shuffle=True,\r\n drop_last=True)\r\n \r\n optimizer = optim.Adam(model.parameters(), lr=self.lr)\r\n \r\n for epoch in range(self.epochs):\r\n train_state['epoch_index'] = epoch\r\n #First step in each epoch is to train over all batches\r\n model.train()\r\n dataset.set_split('train')\r\n train_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: zero gradients\r\n optimizer.zero_grad()\r\n #Step 2: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 3: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n #Step 4: run backward\r\n loss.backward()\r\n #Step 5: update\r\n optimizer.step()\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n train_loss += new_loss\r\n \r\n train_loss /= b_i\r\n train_state['train_loss'].append(train_loss)\r\n \r\n #After training, compute loss on validation set and check for early stop\r\n model.eval()\r\n dataset.set_split('val')\r\n val_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 2: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n val_loss += new_loss\r\n \r\n val_loss /= b_i\r\n train_state['val_loss'].append(val_loss)\r\n \r\n print(\"Finished epoch \"+str(epoch+1)+\". Train loss=\"+\\\r\n str(train_loss)+\", Val loss=\"+str(val_loss))\r\n \r\n if val_loss < train_state['early_stopping_best_val']:\r\n #new best model, reset stopping counter, store model\r\n train_state['early_stopping_step'] = 0\r\n train_state['early_stopping_best_val'] = val_loss\r\n best_model = copy.deepcopy(model)\r\n best_model.load_state_dict(model.state_dict())\r\n train_state['best_model'] = best_model\r\n else:\r\n #val loss not improved; increase early stopping counter\r\n train_state['early_stopping_step'] += 1\r\n if train_state['early_stopping_step'] >= self.early_stopping_criteria:\r\n train_state['stop_early'] = True\r\n print(\"Val loss failed to improve. Stopping early.\")\r\n break\r\n \r\n return train_state['best_model'],train_state", "def _build_model(self):\n\n # Placeholders for our input\n # Our input are MEMORY_LENGTH frames of shape 5, 5 each\n self.X_pl = tf.placeholder(shape=[None, 5, 5, MEMORY_LENGTH], dtype=tf.uint8, name=\"X\")\n # The TD target value\n self.y_pl = tf.placeholder(shape=[None], dtype=tf.float32, name=\"y\")\n # Integer id of which action was selected\n self.actions_pl = tf.placeholder(shape=[None], dtype=tf.int32, name=\"actions\")\n\n X = tf.to_float(self.X_pl)/100\n batch_size = tf.shape(self.X_pl)[0]\n\n # Fully connected layers\n flattened = tf.contrib.layers.flatten(X)\n fc1 = tf.contrib.layers.fully_connected(flattened, 128)\n fc2 = tf.contrib.layers.fully_connected(fc1, 128)\n\n self.predictions = tf.contrib.layers.fully_connected(fc2, len(VALID_ACTIONS), activation_fn=None)\n\n # Get the predictions for the chosen actions only\n gather_indices = tf.range(batch_size) * tf.shape(self.predictions)[1] + self.actions_pl\n self.action_predictions = tf.gather(tf.reshape(self.predictions, [-1]), gather_indices)\n\n # Calculate the loss\n self.losses = tf.squared_difference(self.y_pl, self.action_predictions)\n self.loss = tf.reduce_mean(self.losses)\n\n # Optimizer parameters\n self.optimizer = tf.train.AdamOptimizer()\n self.train_op = self.optimizer.minimize(self.loss, global_step=tf.contrib.framework.get_global_step())", "def _train_model(self):\n raise NotImplementedError()", "def model_compile(self, model, optimiser, loss, metrics, learning_rate = None):\n\n if optimiser == \"sgd\":\n model.compile(optimizer = SGD(lr = learning_rate),\n loss = loss, metrics = metrics)\n elif optimiser == \"rmsprop\":\n model.compile(optimizer = RMSprop(lr = learning_rate),\n loss = loss, metrics = metrics)\n else:\n model.compile(optimizer = optimiser, loss = loss,\n metrics = metrics)", "def _build_model(self):\n if self.weight_function is None:\n self.weight_function = default_weight_function\n\n tf.reset_default_graph()\n\n # Placeholders for the inputs\n self.x0 = tf.placeholder(\n shape=[None, self.num_features],\n dtype=self.dtype,\n name=\"x0\"\n )\n self.x1 = tf.placeholder(\n shape=[None, self.num_features],\n dtype=self.dtype,\n name=\"x1\"\n )\n # Placeholder for the real classes\n self.y0 = tf.placeholder(\n shape=[None, 1],\n dtype=self.dtype,\n name=\"y0\"\n )\n # Placeholder for the weights\n self.w0 = tf.placeholder(\n shape=[None, ],\n dtype=self.dtype,\n name=\"w0\"\n )\n\n # Drop placeholder\n self.should_drop = tf.placeholder(tf.bool, name=\"drop\")\n\n # Regularization\n regularizer = tf.keras.regularizers.l2(self.weight_regularization)\n\n # Input_Dropout\n in0 = tf.layers.dropout(inputs=self.x0,\n rate=self.input_dropout,\n training=self.should_drop\n )\n\n in1 = tf.layers.dropout(inputs=self.x1,\n rate=self.input_dropout,\n training=self.should_drop\n )\n\n # Constructing the feature creation part of the net\n nn0 = tf.layers.dense(\n inputs=in0,\n units=self.hidden_layers[0],\n activation=self.feature_activation,\n use_bias=self.feature_bias,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_hidden_0\"\n )\n\n # By giving nn1 the same name as nn0 and using the flag reuse=True,\n # the weights and biases of all neurons in each branch are identical\n nn1 = tf.layers.dense(\n inputs=in1,\n units=self.hidden_layers[0],\n activation=self.feature_activation,\n use_bias=self.feature_bias,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_hidden_0\",\n reuse=True\n )\n\n # Layer Dropout\n nn0 = tf.layers.dropout(inputs=nn0,\n rate=self.dropout,\n training=self.should_drop\n )\n nn1 = tf.layers.dropout(inputs=nn1,\n rate=self.dropout,\n training=self.should_drop\n )\n\n for i in range(1, len(self.hidden_layers)):\n nn0 = tf.layers.dense(\n inputs=nn0,\n units=self.hidden_layers[i],\n activation=self.feature_activation,\n use_bias=self.feature_bias,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_hidden_\" + str(i)\n )\n nn1 = tf.layers.dense(\n inputs=nn1,\n units=self.hidden_layers[i],\n activation=self.feature_activation,\n use_bias=self.feature_bias,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_hidden_\" + str(i),\n reuse=True\n )\n\n # Layer Dropout\n nn0 = tf.layers.dropout(inputs=nn0,\n rate=self.dropout,\n training=self.should_drop\n )\n nn1 = tf.layers.dropout(inputs=nn1,\n rate=self.dropout,\n training=self.should_drop\n )\n\n # Creating antisymmetric features for the ranking\n self.nn = (nn0 - nn1) / 2.\n\n self.nn = tf.layers.dense(\n inputs=self.nn,\n units=1,\n activation=self.ranking_activation,\n use_bias=False,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_rank\"\n )\n\n self.nn_cls = tf.layers.dense(\n inputs=nn0 / 2.,\n units=1,\n activation=self.ranking_activation,\n use_bias=False,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=regularizer,\n name=\"nn_rank\",\n reuse=True\n )\n\n nn_out = tf.identity(\n input=self.nn,\n name=\"nn\"\n )", "def build_model(nx, layers, activations, lambtha, keep_prob):\n model = K.Sequential()\n reg = K.regularizers.l2\n model.add(K.layers.Dense(layers[0], input_shape=(nx,),\n activation=activations[0],\n kernel_regularizer=reg(lambtha)))\n\n for layer, act in zip(layers[1:], activations[1:]):\n model.add(K.layers.Dropout(1 - keep_prob))\n model.add(K.layers.Dense(layer, activation=act,\n kernel_regularizer=reg(lambtha)))\n\n return model", "def construct_model(self):\n # Set the placeholder for the input episode\n self.inputa = tf.placeholder(tf.float32) # episode train images\n self.inputb = tf.placeholder(tf.float32) # episode test images\n self.labela = tf.placeholder(tf.float32) # episode train labels\n self.labelb = tf.placeholder(tf.float32) # episode test labels\n\n with tf.variable_scope('meta-model', reuse=None) as training_scope:\n # construct the model weights\n self.ss_weights = ss_weights = self.construct_resnet_ss_weights()\n self.weights = weights = self.construct_resnet_weights()\n self.fc_weights = fc_weights = self.construct_fc_weights()\n\n # Load base epoch number from FLAGS\n num_updates = FLAGS.train_base_epoch_num\n\n def task_metalearn(inp, reuse=True):\n \"\"\"The function to process one episode in a meta-batch.\n Args:\n inp: the input episode.\n reuse: whether reuse the variables for the normalization.\n Returns:\n A serious outputs like losses and accuracies.\n \"\"\"\n # Seperate inp to different variables\n inputa, inputb, labela, labelb = inp\n # Generate empty list to record losses\n lossa_list = [] # Base train loss list\n lossb_list = [] # Base test loss list\n\n # Embed the input images to embeddings with ss weights\n emb_outputa = self.forward_resnet(inputa, weights, ss_weights, reuse=reuse) # Embed episode train \n emb_outputb = self.forward_resnet(inputb, weights, ss_weights, reuse=True) # Embed episode test \n\n # Run the first epoch of the base learning\n # Forward fc layer for episode train \n outputa = self.forward_fc(emb_outputa, fc_weights)\n # Calculate base train loss\n lossa = self.loss_func(outputa, labela)\n # Record base train loss\n lossa_list.append(lossa)\n # Forward fc layer for episode test\n outputb = self.forward_fc(emb_outputb, fc_weights)\n # Calculate base test loss\n lossb = self.loss_func(outputb, labelb)\n # Record base test loss\n lossb_list.append(lossb) \n # Calculate the gradients for the fc layer \n grads = tf.gradients(lossa, list(fc_weights.values()))\n gradients = dict(zip(fc_weights.keys(), grads))\n # Use graient descent to update the fc layer\n fast_fc_weights = dict(zip(fc_weights.keys(), [fc_weights[key] - \\\n self.update_lr*gradients[key] for key in fc_weights.keys()]))\n \n for j in range(num_updates - 1):\n # Run the following base epochs, these are similar to the first base epoch\n lossa = self.loss_func(self.forward_fc(emb_outputa, fast_fc_weights), labela)\n lossa_list.append(lossa)\n lossb = self.loss_func(self.forward_fc(emb_outputb, fast_fc_weights), labelb)\n lossb_list.append(lossb) \n grads = tf.gradients(lossa, list(fast_fc_weights.values()))\n gradients = dict(zip(fast_fc_weights.keys(), grads))\n fast_fc_weights = dict(zip(fast_fc_weights.keys(), [fast_fc_weights[key] - \\\n self.update_lr*gradients[key] for key in fast_fc_weights.keys()]))\n\n # Calculate final episode test predictions\n outputb = self.forward_fc(emb_outputb, fast_fc_weights)\n # Calculate the final episode test loss, it is the loss for the episode on meta-train \n final_lossb = self.loss_func(outputb, labelb)\n # Calculate the final episode test accuarcy\n accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))\n\n # Reorganize all the outputs to a list\n task_output = [final_lossb, lossb_list, lossa_list, accb]\n\n return task_output\n\n # Initial the batch normalization weights\n if FLAGS.norm is not 'None':\n unused = task_metalearn((self.inputa[0], self.inputb[0], self.labela[0], self.labelb[0]), False)\n\n # Set the dtype of the outputs\n out_dtype = [tf.float32, [tf.float32]*num_updates, [tf.float32]*num_updates, tf.float32]\n\n # Run two episodes for a meta batch using parallel setting\n result = tf.map_fn(task_metalearn, elems=(self.inputa, self.inputb, self.labela, self.labelb), \\\n dtype=out_dtype, parallel_iterations=FLAGS.meta_batch_size)\n # Seperate the outputs to different variables\n lossb, lossesb, lossesa, accsb = result\n\n # Set the variables to output from the tensorflow graph\n self.total_loss = total_loss = tf.reduce_sum(lossb) / tf.to_float(FLAGS.meta_batch_size)\n self.total_accuracy = total_accuracy = tf.reduce_sum(accsb) / tf.to_float(FLAGS.meta_batch_size)\n self.total_lossa = total_lossa = [tf.reduce_sum(lossesa[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]\n self.total_lossb = total_lossb = [tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]\n\n # Set the meta-train optimizer\n optimizer = tf.train.AdamOptimizer(self.meta_lr)\n self.metatrain_op = optimizer.minimize(total_loss, var_list=list(ss_weights.values()) + list(fc_weights.values()))\n\n # Set the tensorboard \n self.training_summaries = []\n self.training_summaries.append(tf.summary.scalar('Meta Train Loss', (total_loss / tf.to_float(FLAGS.metatrain_epite_sample_num))))\n self.training_summaries.append(tf.summary.scalar('Meta Train Accuracy', total_accuracy))\n for j in range(num_updates):\n self.training_summaries.append(tf.summary.scalar('Base Train Loss Step' + str(j+1), total_lossa[j]))\n for j in range(num_updates):\n self.training_summaries.append(tf.summary.scalar('Base Val Loss Step' + str(j+1), total_lossb[j]))\n\n self.training_summ_op = tf.summary.merge(self.training_summaries)\n\n self.input_val_loss = tf.placeholder(tf.float32)\n self.input_val_acc = tf.placeholder(tf.float32)\n self.val_summaries = []\n self.val_summaries.append(tf.summary.scalar('Meta Val Loss', self.input_val_loss))\n self.val_summaries.append(tf.summary.scalar('Meta Val Accuracy', self.input_val_acc))\n self.val_summ_op = tf.summary.merge(self.val_summaries)", "def construct_model():\n # model = Sequential()\n # model.add(Dense(units=64, activation='relu', input_dim=100))\n # model.add(Dense(units=10, activation='softmax'))\n # model.compile(loss='categorical_crossentropy',\n # optimizer='sgd',\n # metrics=['accuracy'])\n # return model\n\n model = Sequential()\n # Input Layer\n model.add(Conv2D(64, 3, data_format='channels_last', activation='relu', padding='same',\n input_shape=(img_width, img_height, 3)))\n model.add(MaxPool2D(pool_size=2, strides=2))\n # Hidden Layer 1\n model.add(Conv2D(64, 3, activation='relu', padding='same'))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n # Hidden Layer 2\n model.add(Conv2D(128, 3, activation='relu', padding='same'))\n model.add(Conv2D(128, 3, activation='relu', padding='same', strides=2))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n # Hidden Layer 3\n model.add(Conv2D(256, 3, activation='relu', padding='same'))\n model.add(Conv2D(256, 3, activation='relu', padding='same'))\n model.add(Conv2D(256, 3, activation='relu', padding='same', strides=2))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n\n # Fully Connected Layer\n model.add(Flatten())\n # 512 Neuron Layer\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.5))\n # Output Layer\n model.add(Dense(num_of_classes))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model", "def _define_model_functions(self):\n # Input of neurons (Batch size x Number of states)\n states = Input(shape=(self.num_states,), dtype=tf.float32, name=\"states\")\n\n # Hidden layers\n layer_1 = layers.Dense(self.hidden_arch[0], activation=self.activation)(states)\n layers_n = [None for _ in range(len(self.hidden_arch))]\n layers_n[0] = layer_1\n for idx, n_neurons in enumerate(self.hidden_arch[1:]):\n layers_n[idx + 1] = layers.Dense(\n n_neurons,\n activation=self.activation,\n )(layers_n[idx])\n\n # Output of neurons is q(s, a) function\n q_s_a = layers.Dense(self.num_actions, name=\"q_s_a\")(layers_n[-1])\n\n # Get the model\n self.model = Model(inputs=states, outputs=q_s_a)\n\n # Loss function and optimizer\n self.loss = losses.MeanSquaredError(reduction=\"auto\", name=\"mean_squared_error\")\n\n self.optimizer = optimizers.Adam(\n learning_rate=self.learning_rate,\n beta_1=self.beta1,\n beta_2=self.beta2,\n name=\"Adam\",\n )", "def FeatLinModel(VGG, layername='features_20', type=\"weight\", weight=None, chan=0, pos=(10, 10)):\n layers_all = get_model_layers(VGG)\n if 'features' in layername:\n layeridx = layers_all.index(layername) - 1 + 1 # -1 for the \"features\" layer\n VGGfeat = VGG.features[:layeridx]\n else:\n VGGfeat = VGG\n hooks, feat_dict = hook_model(VGG, layerrequest=(layername,))\n layernames = list(feat_dict.keys())\n print(layernames)\n if type == \"weight\":\n def weight_objective(img, scaler=True):\n VGGfeat.forward(img.cuda())\n feat = hooks(layername)\n if scaler:\n return -(feat * weight.unsqueeze(0)).mean()\n else:\n batch = img.shape[0]\n return -(feat * weight.unsqueeze(0)).view(batch, -1).mean(axis=1)\n\n return weight_objective\n elif type == \"neuron\":\n def neuron_objective(img, scaler=True):\n VGGfeat.forward(img.cuda())\n feat = hooks(layername)\n if len(feat.shape) == 4:\n if scaler:\n return -(feat[:, chan, pos[0], pos[1]]).mean()\n else:\n batch = img.shape[0]\n return -(feat[:, chan, pos[0], pos[1]]).view(batch, -1).mean(axis=1)\n elif len(feat.shape) == 2:\n if scaler:\n return -(feat[:, chan]).mean()\n else:\n batch = img.shape[0]\n return -(feat[:, chan]).view(batch, -1).mean(axis=1)\n return neuron_objective", "def mini_model(self):\n with tf.variable_scope(name_or_scope='human2d_network'):\n # down-sampling\n resi_0 = res_layer(self._input, filters=16, strides=2, kernel_size=7, training=self.training, name='resi_0')\n resi_1 = res_layer(resi_0, filters=32, strides=1, kernel_size=3, training=self.training, name='resi_1')\n pool_0 = max_pool_layer(resi_1, name='pool_0')\n resi_2 = res_layer(pool_0, filters=32, strides=1, kernel_size=3, training=self.training, name='resi_2')\n # hourglass module\n resi_3 = res_layer(resi_2, filters=64, strides=1, kernel_size=3, training=self.training, name='resi_3')\n hrgs_0 = hourglass_layer(resi_3, training=True, name='hrgs_0')\n # keypoint output\n keypoint_pre_0 = res_layer(hrgs_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='keypoint_pre_0')\n keypoint_pre_1 = res_layer(keypoint_pre_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='keypoint_pre_1')\n keypoint_pre_2 = res_layer(keypoint_pre_1, filters=64, strides=1, kernel_size=3, training=self.training,\n name='keypoint_pre_2')\n keypoint_output_raw = res_layer(keypoint_pre_2, filters=14, strides=1, kernel_size=1,\n training=self.training, bottleneck=False, name='keypoint_output_raw')\n keypoint_output = tf.nn.sigmoid(x=keypoint_output_raw, name='keypoint_output')\n # silhouette output\n silhouette_pre_0 = res_layer(hrgs_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='silhouette_pre_0')\n silhouette_pre_1 = res_layer(silhouette_pre_0, filters=64, strides=1, kernel_size=3, training=self.training,\n name='silhouette_pre_1')\n silhouette_pre_2 = res_layer(silhouette_pre_1, filters=64, strides=1, kernel_size=3, training=self.training,\n name='silhouette_pre_2')\n silhouette_output_raw = res_layer(silhouette_pre_2, filters=2, strides=1, kernel_size=1,\n training=self.training, bottleneck=False, name='silhouette_output_raw')\n silhouette_output = tf.nn.softmax(logits=silhouette_output_raw, name='silhouette_output')\n # return\n return None, None, keypoint_output, silhouette_output", "def train_model(model, train_input, train_target, validation_input, validation_target, nb_epochs, mini_batch_size, learning_rate, momentum = 0, sched_ = None, opt = 'SGD', loss = 'MSE'):\n if opt == 'SGD' :\n optimizer = SGD(model.param(), learning_rate, momentum)\n elif opt == 'Adadelta':\n optimizer = Adadelta(model.param(), learning_rate)\n sched = Scheduler(learning_rate)\n \n if loss == 'MSE' :\n criterion = MSELoss()\n elif loss == 'CrossEntropy' :\n criterion = CrossEntropy()\n \n losses = []\n train_errors = []\n validation_errors = []\n\n for epoch in range(nb_epochs):\n acc_loss = 0\n nb_train_errors = 0\n indices = torch.randperm(train_input.size(0))\n \n for b in range(0, train_input.size(0), mini_batch_size):\n # indices for batch\n indices_subset = indices[b:b+mini_batch_size]\n # subsets for batch\n train_input_subset = train_input.index_select(0, indices_subset)\n train_target_subset = train_target.index_select(0, indices_subset)\n \n optimizer.zero_grad() \n output = model.forward(train_input_subset)\n \n for k in range(mini_batch_size):\n if torch.max(train_target.data[indices[b+k]], 0)[1] != torch.max(output[k], 0)[1]:\n nb_train_errors += 1\n \n loss = criterion.forward(output, train_target_subset)\n acc_loss += loss\n \n output_grad = criterion.backward()\n model.backward(output_grad)\n optimizer.step()\n if sched_ == 'step_decay' :\n sched.step_decay(epoch, learning_rate, 0.5, nb_epochs/4)\n if sched_ == 'clr' :\n sched.cyclical_lr(nb_epochs, learning_rate/4, learning_rate, epoch)\n elif sched_ == None :\n pass\n \n losses.append(acc_loss)\n train_errors.append((100 * nb_train_errors) / train_input.size(0))\n \n nb_validation_errors, _ = compute_nb_errors(model, validation_input, validation_target, mini_batch_size)\n validation_errors.append((100 * nb_validation_errors) / validation_input.size(0))\n \n if epoch%10 == 0: print('Epoch {:d} Train loss {:.02f} Train error {:.02f}% Validation error {:.02f}%'.format(epoch, acc_loss, (100 * nb_train_errors) / train_input.size(0), (100 * nb_validation_errors) / validation_input.size(0)))\n \n return losses, train_errors, validation_errors", "def MLP_model(self):\n print(\"Building model..\")\n self.model = Sequential()\n\n # first hidden layer (0)\n self.model.add(Dense(self.h_nodes0, input_dim=self.input_size, use_bias=True))\n self.model.add(Activation(self.activation0))\n self.model.add(Dropout(self.dropout0))\n\n # second hidden layer (1)\n if self.h_nodes1 != None:\n self.model.add(Dense(self.h_nodes1, use_bias=True))\n self.model.add(Activation(self.activation1))\n self.model.add(Dropout(self.dropout1))\n\n # third hidden layer (2)\n if self.h_nodes2 != None:\n self.model.add(Dense(self.h_nodes2, use_bias=True))\n self.model.add(Activation(self.activation2))\n self.model.add(Dropout(self.dropout2))\n\n #output layer\n self.model.add(Dense(self.output_size))\n self.model.add(Activation(self.activation_out))\n\n #compile model\n self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=[R_squared])\n\n return self.model", "def tune_model(self):\n model = model_from_json(open(self.config.model_name).read())\n model.compile(loss='mse', optimizer='adam')\n model.load_weights(self.config.save_path)\n history = self.model.fit_generator(generator=self.train, \n samples_per_epoch=self.train_len, nb_epoch=self.config.epochs,)\n self.model.save_weights(self.config.save_path)\n return history", "def model_fn(mode, inputs, columns, config, reuse=False):\r\n\r\n if mode == 'train':\r\n mode = tf.estimator.ModeKeys.TRAIN\r\n \r\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\r\n \r\n labels = inputs['labels']\r\n ##labels = tf.cast(labels, tf.int64)\r\n\r\n # -----------------------------------------------------------\r\n # MODEL: define the forward ops\r\n with tf.variable_scope('linear_part', reuse=reuse):\r\n linear_logits = build_linear_model(inputs, columns['liner_feature'], config)\r\n\r\n with tf.variable_scope('dnn_part', reuse=reuse):\r\n dnn_logits = build_dnn_model(mode, inputs, columns['deep_feature'], config)\r\n \r\n logits = linear_logits + dnn_logits\r\n predictions = tf.nn.sigmoid(logits)\r\n #print(labels)\r\n # loss\r\n loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)\r\n\r\n # train_ops\r\n if is_training:\r\n linear_model_config = config['linear_model']\r\n dnn_model_config = config['dnn_model']\r\n\r\n global_step = tf.train.get_or_create_global_step()\r\n linear_optimizer = tf.train.FtrlOptimizer(\r\n learning_rate=float(linear_model_config['learning_rate']),\r\n l1_regularization_strength=float(linear_model_config['l1_reg']),\r\n l2_regularization_strength=float(linear_model_config['l2_reg']))\r\n\r\n dnn_optimizer = tf.train.AdamOptimizer(\r\n learning_rate=float(dnn_model_config['learning_rate']),\r\n beta1=float(dnn_model_config.get('beta1', 0.9)),\r\n beta2=float(dnn_model_config.get('beta2', 0.999)),\r\n epsilon=float(dnn_model_config.get('epsilon', 1e-8)))\r\n\r\n train_ops = []\r\n\r\n train_ops.append(\r\n linear_optimizer.minimize(\r\n loss,\r\n global_step=global_step,\r\n var_list=tf.get_collection(\r\n tf.GraphKeys.TRAINABLE_VARIABLES,\r\n scope='linear_part')))\r\n\r\n train_ops.append(\r\n dnn_optimizer.minimize(\r\n loss,\r\n global_step=global_step,\r\n var_list=tf.get_collection(\r\n tf.GraphKeys.TRAINABLE_VARIABLES,\r\n scope='dnn_part')))\r\n\r\n train_op = tf.group(*train_ops)\r\n\r\n # -----------------------------------------------------------\r\n # METRICS AND SUMMARIES\r\n # Metrics for evaluation using tf.metrics (average over whole dataset)\r\n with tf.variable_scope(\"metrics\"):\r\n metrics = {\r\n 'loss': tf.metrics.mean(loss),\r\n 'auc': tf.metrics.auc(labels=labels, predictions=predictions, \r\n num_thresholds=200, summation_method='trapezoidal')\r\n }\r\n\r\n # Group the update ops for the tf.metrics\r\n update_metrics_op = tf.group(*[op for _, op in metrics.values()])\r\n\r\n # Get the op to reset the local variables used in tf.metrics\r\n metric_variables = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope=\"metrics\")\r\n metrics_init_op = tf.variables_initializer(metric_variables)\r\n\r\n # -----------------------------------------------------------\r\n # MODEL SPECIFICATION\r\n # Create the model specification and return it\r\n # It contains nodes or operations in the graph that will be used for training and evaluation\r\n model_spec = inputs\r\n model_spec['variable_init_op'] = [tf.global_variables_initializer(),\r\n tf.tables_initializer()]\r\n model_spec[\"predictions\"] = predictions\r\n model_spec['loss'] = loss\r\n model_spec['metrics_init_op'] = metrics_init_op\r\n model_spec['metrics'] = metrics\r\n model_spec['update_metrics'] = update_metrics_op\r\n\r\n if is_training:\r\n model_spec['train_op'] = train_op\r\n\r\n return model_spec", "def NN(train_df, val_df, test_df, sub_path):\n logging.info('Neural Network preprocessing')\n \n if train_df is not None: \n y_train = train_df['is_attributed'].values\n train_df = train_df.drop('is_attributed', axis = 1)\n train_df = train_df.drop('attributed_time', axis = 1) \n #train_df = train_df.drop('click_time', axis = 1) #only if no preprocessing\n gc.collect()\n if val_df is not None:\n y_val = val_df['is_attributed'].values \n val_df = val_df.drop(['is_attributed'], axis = 1)\n val_df = get_keras_data(val_df)\n \n list_variables = get_values(train_df)\n print(list_variables)\n \n logging.info('Model is creating...') \n \n max_var = []\n if test_df is not None:\n for i, var in enumerate(list_variables):\n max_var.append(np.max([train_df[var].max(), test_df[var].max()])+1) \n train_df = get_keras_data(train_df)\n else:\n for i, var in enumerate(list_variables):\n max_var.append(train_df[var].max()+1) \n train_df = get_keras_data(train_df)\n \n emb_n = 50\n dense_n = 1000\n \n in_var = []\n emb_var = [] \n for i, var in enumerate(list_variables):\n in_var.append(Input(shape=[1], name = var))\n emb_var.append(Embedding(max_var[i], emb_n)(in_var[i]))\n \n fe = concatenate([emb for emb in emb_var])\n s_dout = SpatialDropout1D(0.2)(fe)\n fl1 = Flatten()(s_dout)\n #conv = Conv1D(100, kernel_size=4, strides=1, padding='same')(s_dout)\n dl = Dense(100)(s_dout)\n fl2 = Flatten()(dl)\n concat = concatenate([(fl1), (fl2)])\n x = Dropout(0.2)(Dense(dense_n,activation='relu')(concat))\n x = Dropout(0.2)(Dense(dense_n,activation='relu')(x))\n outp = Dense(1,activation='sigmoid')(x)\n \n model = Model(inputs=[var for var in in_var], outputs=outp)\n \n logging.info('Model is compiling...')\n \n batch_size = 50000\n epochs = 2 #12 for sample_train\n exp_decay = lambda init, fin, steps: (init/fin)**(1/(steps-1)) - 1\n steps = int(len(list(train_df)[0]) / batch_size) * epochs\n lr_init, lr_fin = 0.002, 0.0002\n lr_decay = exp_decay(lr_init, lr_fin, steps)\n optimizer_adam = Adam(lr=lr_init, decay=lr_decay)\n \n model.compile(loss='binary_crossentropy',optimizer=optimizer_adam,metrics=['accuracy'])\n model.summary()\n \n logging.info('Model is training...')\n \n model.fit(train_df, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, verbose=2, validation_split=0.1)\n del train_df, y_train; gc.collect()\n \n if val_df is not None:\n logging.info('Prediction on validation set')\n predictions_NN_prob = model.predict(val_df, batch_size=batch_size, verbose=2)\n del val_df; gc.collect()\n predictions_NN_prob = predictions_NN_prob[:,0]\n \n predictions_NN = np.where(predictions_NN_prob > 0.5, 1, 0)\n acc_NN = accuracy_score(y_val, predictions_NN)\n print('Overall accuracy of Neural Network model:', acc_NN)\n \n if test_df is not None:\n logging.info('Prediction on test set')\n sub = pd.DataFrame()\n sub['click_id'] = test_df['click_id'].astype('int')\n test_df = test_df.drop(['click_id'], axis=1)\n test_df = get_keras_data(test_df)\n \n sub['is_attributed'] = model.predict(test_df, batch_size=batch_size, verbose=2)\n del test_df; gc.collect()\n logging.info(\"Writing....\")\n with file_io.FileIO(sub_path, mode='wb') as fout:\n sub.to_csv(fout,index=False)\n logging.info(\"Done...\")\n logging.info(sub.info())", "def get_model():\r\n model = Sequential([\r\n\r\n Lambda(normalize, input_shape=(66, 200, 3)),\r\n\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(1, (3, 3), padding='same', activation='relu', strides=2),\r\n Flatten(),\r\n\r\n\r\n ])\r\n\r\n model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])\r\n return model", "def __init__(\n self, \n dim_feat_raw, \n dim_feat_smooth, \n dim_label_raw, \n dim_label_smooth, \n arch_gnn, \n aug_feat,\n num_ensemble, \n train_params\n ):\n super().__init__()\n self.mulhead = 1\n self.num_layers = arch_gnn[\"num_layers\"]\n self.dropout, self.dropedge = train_params[\"dropout\"], train_params['dropedge']\n self.mulhead = int(arch_gnn[\"heads\"]) # only useful for GAT\n\n self.branch_sharing = arch_gnn['branch_sharing'] # only for ensemble\n\n self.type_feature_augment = aug_feat\n assert dim_feat_raw <= dim_feat_smooth, \"smoothened feature cannot have smaller shape than the original one\"\n # NOTE: dim_label_raw may be larger than dim_label_smooth ==> label is not used as input\n self.num_classes = dim_label_raw\n self.dim_label_in = dim_label_smooth\n self.dim_feat_in = dim_feat_smooth\n self.dim_hidden = arch_gnn['dim']\n # build the model below\n dim, act = arch_gnn['dim'], arch_gnn['act']\n self.aug_layers, self.conv_layers, self.res_pool_layers = [], [], []\n for i in range(num_ensemble):\n # feat aug\n if len(self.type_feature_augment) > 0:\n self.aug_layers.append(nn.ModuleList(\n nn.Linear(_dim, self.dim_feat_in) for _, _dim in self.type_feature_augment\n ))\n # graph convs\n convs = []\n if i == 0 or not self.branch_sharing:\n for j in range(arch_gnn['num_layers']):\n cls_gconv = DeepGNN.NAME2CLS[arch_gnn['aggr']]\n dim_in = (self.dim_feat_in + self.dim_label_in) if j == 0 else dim\n convs.append(cls_gconv(dim_in, dim, dropout=self.dropout, act=act, mulhead=self.mulhead))\n self.conv_layers.append(nn.Sequential(*convs))\n else: # i > 0 and branch_sharing\n self.conv_layers.append(self.conv_layers[-1])\n # skip-pooling layer\n type_res = arch_gnn['residue'].lower()\n type_pool = arch_gnn['pooling'].split('-')[0].lower()\n cls_res_pool = layers.ResPool\n args_pool = {}\n if type_pool == 'sort':\n args_pool['k'] = int(arch_gnn['pooling'].split('-')[1])\n self.res_pool_layers.append(\n cls_res_pool(dim, dim, arch_gnn['num_layers'], type_res, type_pool,\n dropout=self.dropout, act=act, args_pool=args_pool\n ))\n if len(self.aug_layers) > 0:\n self.aug_layers = nn.ModuleList(self.aug_layers)\n self.conv_layers = nn.ModuleList(self.conv_layers)\n self.res_pool_layers = nn.ModuleList(self.res_pool_layers)\n # ------- ensembler + classifier -------\n if num_ensemble == 1:\n self.ensembler = layers.EnsembleDummy()\n else:\n self.ensembler = layers.EnsembleAggregator(dim, dim, num_ensemble, dropout=self.dropout, \n type_dropout=train_params[\"ensemble_dropout\"], act=arch_gnn[\"ensemble_act\"])\n self.classifier = DeepGNN.NAME2CLS['mlp'](dim, self.num_classes, act='I', dropout=0.)\n # ---- optimizer, etc. ----\n self.lr = train_params[\"lr\"]\n self.sigmoid_loss = arch_gnn[\"loss\"] == \"sigmoid\"\n self.loss, self.opt_op = 0, None\n self.optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n\n self.num_ensemble = num_ensemble", "def build_model(input_shape, X_train, arch=\"VGG16\", loss=\"sparse_categorical_crossentropy\", learning_rate=[0.0005, 0.0001, 0.00002]):\n # select model architecture\n if arch == \"VGG16\":\n model = models.VGG16(input_shape, num_layers=num_labels)\n elif arch = \"VGG16_twist\":\n model = models.VGG16_twst(input_shape, num_layers=num_labels)\n elif arch = \"VGG11\":\n model = VGG11(input_shape, X_train, num_layers=num_labels)\n\n # learning rate constant decay\n learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay(\n BOUNDARIES, learning_rate)\n\n model.summary()\n # compile model\n optimiser = tf.optimizers.Adam(learning_rate=learning_rate_fn)\n model.compile(optimizer=optimiser,\n # loss=loss,\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n metrics=[\"accuracy\"])\n return model", "def create_model(X, y, it=1, no_of_filters=32, kern_size=3,\n max_p_size=3, drop_perc_conv=0.3, drop_perc_dense=0.2,\n dens_size=128, val_split_perc=0.1, no_of_epochs=30,\n optimizer=\"adam\", random_search=False, batch_size=64):\n\n y_train_cat = to_categorical(y)\n\n model = Sequential()\n\n model.add(Conv2D(no_of_filters,\n kernel_size=(kern_size, kern_size),\n activation='relu',\n input_shape=(56, 56, 1),\n padding='same'))\n\n model.add(Conv2D(no_of_filters,\n kernel_size=(kern_size, kern_size),\n activation='relu',\n padding='same'))\n model.add(MaxPooling2D((max_p_size, max_p_size)))\n model.add(Dropout(drop_perc_conv))\n\n model.add(Conv2D(no_of_filters,\n kernel_size=(kern_size, kern_size),\n activation='relu',\n padding='same'))\n model.add(Conv2D(no_of_filters,\n kernel_size=(kern_size, kern_size),\n activation='relu',\n padding='same'))\n model.add(MaxPooling2D((max_p_size, max_p_size)))\n model.add(Dropout(drop_perc_conv))\n\n model.add(Flatten())\n\n model.add(Dense(dens_size, activation='relu'))\n model.add(Dropout(drop_perc_dense))\n\n model.add(Dense(36, activation='softmax'))\n\n model.compile(optimizer=optimizer,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n early_stopping_monitor = EarlyStopping(patience=5)\n rlrop = ReduceLROnPlateau(monitor='val_acc', factor=0.5,\n patience=3, verbose=1, min_lr=0.00001)\n\n history = model.fit(X,\n y_train_cat,\n validation_split=val_split_perc,\n epochs=no_of_epochs,\n callbacks=[early_stopping_monitor, rlrop],\n batch_size=batch_size)\n\n history_dict = history.history\n\n if random_search:\n\n np.save(r\"./models/random_search/hist/history_dict_{}.npy\".format(it),\n history_dict)\n model.save(r\"./models/random_search/models/CNN_{}.h5\".format(it))\n\n else:\n\n np.save(r\"./logs/history_dict_{}.npy\".format(it), history_dict)\n model.save(r\"./models/CNN_FF_{}.h5\".format(it))\n\n return history_dict", "def medium_model(optimizer='rmsprop', init='glorot_uniform', dropout=0.3, nvars=8):\n model = Sequential()\n model.add(Dropout(dropout, input_shape=(nvars,)))\n model.add(Dense(11, input_dim=nvars, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(1)))\n model.add(Dropout(dropout))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(1)))\n model.add(Dropout(dropout))\n model.add(Dense(1, kernel_initializer=init))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model", "def compile(self, learning_rate, momentum):\n # Optimizer object\n optimizer = keras.optimizers.SGD(\n lr=learning_rate, momentum=momentum,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\"]\n if self.config.MODEL == 'mrcnn':\n loss_names.append(\"mrcnn_mask_loss\")\n \n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.metrics_tensors.append(loss)", "def model_build(self):\n\n # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!\n X_input = Input(self.inputData[0].shape)\n\n '''\n # CONV -> BN -> RELU Block applied to X\n X = Conv2D(8, (8, 8), name='conv0')(X_input)\n X = BatchNormalization(name='bn0')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool0')(X)\n X = Dropout(0.1, name='dropout0')(X)\n\n X = Conv2D(16, (16, 16), name='conv1')(X)\n X = BatchNormalization(name='bn1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool1')(X)\n X = Dropout(0.1, name='dropout1')(X)\n\n X = Conv2D(16, (32, 32), name='conv2')(X)\n X = BatchNormalization(name='bn2')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool2')(X)\n X = Dropout(0.1, name='dropout2')(X)\n' '''\n\n X = Dense(500, activation='relu', name='fc0')(X_input)\n X = Dropout(0.1, name='dropout1')(X)\n X = Dense(500, activation='relu', name='fc1')(X)\n X = Dropout(0.1, name='dropout2')(X)\n X = Dense(3, activation='softmax', name='fc2')(X)\n\n # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.\n self.model = Model(inputs=X_input, outputs=X, name='acouModel')", "def build_model(self):\n self.model = models.Sequential()\n self.model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))\n self.model.add(layers.MaxPooling2D((2, 2)))\n self.model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n self.model.add(layers.MaxPooling2D((2, 2)))\n self.model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n self.model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss='mse', metrics=['mae'])\n self.model.add(layers.Flatten())\n self.model.add(layers.Dense(64, activation='relu'))\n self.model.add(layers.Dense(10, activation='softmax'))\n self.model.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])", "def build_model(train_inputs,train_labels,model_params,model_mode='classification',\n model_type='naive_bayes'):\n if model_mode == \"classification\":\n if model_type == \"naive_bayes\":\n model = GaussianNB()\n if model_type == \"knn\":\n model = KNeighborsClassifier(n_neighbors=50)\n if model_type == \"svm\":\n model = SVC(kernel='poly', degree =27, coef0 =1, C=5)\n if model_type == \"decision_tree\":\n model = DecisionTreeClassifier(min_samples_split=45,min_samples_leaf=45,criterion=\"gini\")\n #model = RandomForestClassifier(n_estimators=500, n_jobs=-1)\n\n if model_mode == \"regression\":\n if model_type == \"knn\":\n model = KNeighborsRegressor()\n if model_type == \"svm\":\n model = SVR()\n if model_type == \"decision_tree\":\n model = DecisionTreeRegressor()\n\n\n model.fit(train_inputs, train_labels)\n # for name, score in zip(train_inputs.columns,model.feature_importances_):\n # print(name, score)\n\n return model", "def model_loss(input_real, input_z, out_channel_dim):\r\n # TODO: Implement Function\r\n print(\"<<<<<<<<<<<input_z.shape\", input_z.shape)\r\n g_model = generator(input_z, out_channel_dim, is_train=True)\r\n\r\n print(\"<<<<<<<<<<<input_real.shape\", input_real.shape)\r\n d_model_real, d_logits_real = discriminator(input_real, reuse=False)\r\n\r\n print(\"<<<<<<<<<<<g_model.shape\", g_model.shape)\r\n d_model_fake, d_logits_fake = discriminator(g_model, reuse=True)\r\n print(\"<<<<<<<<<<<\", d_model_fake.shape, d_logits_fake.shape)\r\n\r\n ## add smooth here\r\n\r\n smooth = 0.1\r\n d_loss_real = tf.reduce_mean(\r\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,\r\n labels=tf.ones_like(d_model_real) * (1 - smooth)))\r\n\r\n d_loss_fake = tf.reduce_mean(\r\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake)))\r\n\r\n g_loss = tf.reduce_mean(\r\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,\r\n labels=tf.ones_like(d_model_fake)))\r\n\r\n d_loss = d_loss_real + d_loss_fake\r\n\r\n return d_loss, g_loss, g_model", "def eval_model(args):\n cfg, lbl = util.get_label_cfg_by_args(args)\n uid = cfg['uniqueid']\n print('We are playing with %s' % uid)\n outdir='models/%s/gate_expert' % uid\n outname='gate_expert_model.pt'\n if KLLOSS:\n outname = 'gate_expert_kldiv_model.pt'\n if args.warm:\n outname = outname.replace('.pt', '_warm.pt')\n mdl_path = os.path.join(outdir, outname)\n gate_expert = GateExpertNet(mdl_path, args.argmax)\n eval_fun = gate_expert.get_y\n\n data = npload(cfg['file_path'], uid)\n datax = data[cfg['x_name']]\n datay = data[cfg['y_name']]\n evaly = eval_fun(datax)\n print(np.histogram(evaly[:, 48]))\n fig, ax = pld.get3dAxis()\n ax.scatter(datax[:, 0], datax[:, 1], evaly[:, 48])\n loss = l1loss(evaly, datay)\n err_norm = np.mean(loss, axis=1)\n fig, ax = plt.subplots()\n ax.hist(err_norm)\n plt.show()", "def create_model():\n model = Sequential()\n\n model.add(Dense(18, input_dim=9, kernel_initializer='normal', activation='relu'))\n model.add(Dropout(0.1))\n model.add(Dense(9, kernel_initializer='normal', activation='relu'))\n model.add(Dropout(0.1))\n model.add(Dense(1, kernel_initializer='normal'))\n\n learning_rate = 0.001\n momentum = 0.8\n sgd = SGD(lr=learning_rate, momentum=momentum, nesterov=False)\n model.compile(loss='mean_squared_error', optimizer=sgd)\n model.summary()\n return model", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def train_model(model, data, optimizer, loss_type, beta1=None, beta2=None):\n model.train()\n optimizer.zero_grad()\n logits, reg_info = model(data)\n if loss_type == 'sigmoid':\n loss = torch.nn.BCEWithLogitsLoss(reduction='mean')(logits[data.train_mask], data.y[data.train_mask])\n elif loss_type == 'softmax':\n loss = torch.nn.CrossEntropyLoss(reduction='mean')(logits[data.train_mask], data.y[data.train_mask])\n else:\n raise\n # Add IB loss:\n if beta1 is not None and beta1 != 0:\n ixz = torch.stack(reg_info[\"ixz_list\"], 1).mean(0).sum()\n if model.struct_dropout_mode[0] == 'DNsampling' or (model.struct_dropout_mode[0] == 'standard' and len(model.struct_dropout_mode) == 3):\n ixz = ixz + torch.stack(reg_info[\"ixz_DN_list\"], 1).mean(0).sum()\n loss = loss + ixz * beta1\n if beta2 is not None and beta2 != 0:\n structure_kl_loss = torch.stack(reg_info[\"structure_kl_list\"]).mean()\n if model.struct_dropout_mode[0] == 'DNsampling' or (model.struct_dropout_mode[0] == 'standard' and len(model.struct_dropout_mode) == 3):\n structure_kl_loss = structure_kl_loss + torch.stack(reg_info[\"structure_kl_DN_list\"]).mean()\n loss = loss + structure_kl_loss * beta2\n loss.backward()\n optimizer.step()", "def _hg_model_fn(features, labels, mode, params):\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n weight_decay = params.weight_decay\n momentum = params.momentum\n decay_factor = params.decay_factor\n decay_step = params.decay_step\n init_learning_rate = params.init_learning_rate\n num_stacks = params.num_stacks\n num_joints = params.num_joints\n\n tower_features = features\n if mode == tf.estimator.ModeKeys.PREDICT:\n if num_gpus < 1:\n tower_labels = [None]\n else:\n tower_labels = [None for i in range(num_gpus)]\n else:\n tower_labels = labels\n\n tower_losses = []\n tower_gradvars = []\n tower_preds = []\n\n # channels first (NCHW) is normally optimal on GPU and channels last (NHWC)\n # on CPU. The exception is Intel MKL on CPU which is optimal with\n # channels_last.\n data_format = params.data_format\n if not data_format:\n if num_gpus == 0:\n data_format = 'channels_last'\n else:\n data_format = 'channels_first'\n\n if num_gpus == 0:\n num_devices = 1\n device_type = 'cpu'\n else:\n num_devices = num_gpus\n device_type = 'gpu'\n\n for i in range(num_devices):\n worker_device = '/{}:{}'.format(device_type, i)\n if variable_strategy == 'CPU':\n device_setter = utils.local_device_setter(\n worker_device=worker_device)\n elif variable_strategy == 'GPU':\n device_setter = utils.local_device_setter(\n ps_device_type='gpu',\n worker_device=worker_device,\n ps_strategy=tf.contrib.training.GreedyLoadBalancingStrategy(\n num_gpus, tf.contrib.training.byte_size_load_fn))\n if mode == tf.estimator.ModeKeys.TRAIN:\n batch_size = params.train_batch_size / num_devices\n else:\n batch_size = params.eval_batch_size / num_devices\n\n with tf.variable_scope('hg', reuse=bool(i != 0)):\n with tf.name_scope('tower_%d' % i) as name_scope:\n with tf.device(device_setter):\n loss, gradvars, preds = _tower_fn(\n mode, weight_decay, tower_features[i][0], tower_labels[i],\n data_format, params.batch_norm_decay,\n params.batch_norm_epsilon, params.num_stacks, params.num_out, params.n_low, params.num_joints, batch_size,params.seq_length)\n tower_losses.append(loss)\n tower_gradvars.append(gradvars)\n tower_preds.append(preds)\n if i == 0:\n # Only trigger batch_norm moving mean and variance update from\n # the 1st tower. Ideally, we should grab the updates from all\n # towers but these stats accumulate extremely fast so we can\n # ignore the other stats from the other towers without\n # significant detriment.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,\n name_scope)\n\n if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:\n\n # Now compute global loss and gradients.\n gradvars = []\n with tf.name_scope('gradient_averaging'):\n all_grads = {}\n for grad, var in itertools.chain(*tower_gradvars):\n if grad is not None:\n all_grads.setdefault(var, []).append(grad)\n for var, grads in six.iteritems(all_grads):\n # Average gradients on the same device as the variables\n # to which they apply.\n with tf.device(var.device):\n if len(grads) == 1:\n avg_grad = grads[0]\n else:\n avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))\n gradvars.append((avg_grad, var))\n\n # Device that runs the ops to apply global gradient updates.\n consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'\n with tf.device(consolidation_device):\n\n learning_rate = tf.train.exponential_decay(init_learning_rate, tf.train.get_global_step(), decay_step, decay_factor, staircase=True, name= 'learning_rate')\n\n loss = tf.reduce_mean(tower_losses, name='loss')\n\n examples_sec_hook = utils.ExamplesPerSecondHook(\n params.train_batch_size, every_n_steps=10)\n\n tensors_to_log = {'learning_rate': learning_rate, 'loss': loss}\n\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=100)\n\n train_hooks = [logging_hook, examples_sec_hook]\n\n optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)\n\n if params.sync:\n optimizer = tf.train.SyncReplicasOptimizer(\n optimizer, replicas_to_aggregate=num_workers)\n sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)\n train_hooks.append(sync_replicas_hook)\n\n # Create single grouped train op\n train_op = [\n optimizer.apply_gradients(\n gradvars, global_step=tf.train.get_global_step())\n ]\n \n train_op.extend(update_ops)\n train_op = tf.group(*train_op)\n\n predictions = {\n 'heatmaps':\n tf.concat([p['heatmaps'] for p in tower_preds], axis=0),\n 'images':\n tf.concat([i for i in tower_features], axis=0)\n }\n if mode==tf.estimator.ModeKeys.EVAL:\n hm = predictions['heatmaps']\n stacked_labels = tf.concat(labels[0][0][0], axis=0)\n \n gt_labels = tf.transpose(stacked_labels,[1,0,3,4,2])\n\n joint_accur = []\n for j in range(params.seq_length):\n for i in range(params.num_joints):\n joint_accur.append(_pck_hm(hm[j,:,-1, :, :,i], gt_labels[j,:, :, :, i], params.eval_batch_size/num_devices))\n accuracy = tf.stack(joint_accur)\n metrics = {'Mean Pixel Error': tf.metrics.mean(accuracy)}\n tf.logging.info('Accuracy op computed')\n else:\n metrics = None\n \n else:\n train_op = None\n loss = None\n train_hooks = None\n metrics = None\n predictions = {\n 'heatmaps':\n tf.concat([p['heatmaps'] for p in tower_preds], axis=0),\n 'images':\n tf.concat([i for i in tower_features], axis=0)\n }\n \n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n training_hooks=train_hooks,\n eval_metric_ops=metrics)", "def BuildModel(ANNSetup,model):\n\n if(isinstance(ANNSetup.Activ,str)):\n model.add(Dense(ANNSetup.Neurons[0], kernel_regularizer=l2(ANNSetup.Regu), activation=ANNSetup.Activ, kernel_initializer=Winit(ANNSetup.Winit), input_dim=ANNSetup.InputDim))\n if(ANNSetup.Dropout != None):\n model.add(Dropout(ANNSetup.Dropout))\n for i in range(1,len(ANNSetup.Neurons)):\n if(i == len(ANNSetup.Neurons)-1):\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation='sigmoid'))\n else:\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation=ANNSetup.Activ))\n else:\n model.add(Dense(ANNSetup.Neurons[0], kernel_regularizer=l2(ANNSetup.Regu), kernel_initializer=Winit(ANNSetup.Winit), input_dim=ANNSetup.InputDim))\n model.add(LeakyReLU(alpha=ANNSetup.Activ))\n if(ANNSetup.Dropout != None):\n model.add(Dropout(ANNSetup.Dropout))\n for i in range(1,len(ANNSetup.Neurons)):\n if(i == len(ANNSetup.Neurons)-1):\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation='sigmoid'))\n else:\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit)))\n model.add(LeakyReLU(alpha=ANNSetup.Activ))\n\n return model", "def sub_model_net(self):\r\n # define input\r\n x = keras.Input(shape=(960,), name='input')\r\n fc_2 = keras.layers.Dense(160, name='fc_2')(x)\r\n add_1 = keras.layers.Activation('relu')(fc_2)\r\n drop = keras.layers.Dropout(0.5)\r\n # output\r\n y_hat = keras.layers.Dense(1283, activation='softmax', name='output')(add_1)\r\n model = keras.Model(inputs=x, outputs=y_hat)\r\n\r\n return model", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>\n self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']\n # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>\n self.visual_names = ['real_A', 'fake_B', 'real_B']\n # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>\n if self.isTrain:\n self.model_names = ['G', 'D']\n else: # during test time, only load G\n self.model_names = ['G']\n\n # Set TPN_enabled to true if opt.TPN is defined\n if opt.TPN:\n self.TPN_enabled = True\n else:\n self.TPN_enabled = False\n\n # Conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc\n discr_input_nc = opt.input_nc + opt.output_nc\n\n # If TPN is enabled, switch to the U-Net with TPN architecture\n if self.TPN_enabled:\n opt.netG = 'unet_256_TPN'\n discr_input_nc +=1 # Additional Channel for Time Input\n\n # define networks (both generator and discriminator)\n self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain: # define a discriminator; \n self.netD = networks.define_D(discr_input_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.TPN_enabled:\n self.loss_names = ['G_GAN', 'G_L1', 'G_TPN', 'D_real', 'D_fake']\n\n # Store final gamma value and then set it to 0\n self.final_gamma = deepcopy(opt.gamma)\n opt.gamma = 0\n\n # Initiliaze m and c to None\n self.update_m = None\n self.update_c = None\n\n # Setup TPN if set to True\n print(\"\\nSetting up TPN\\n\")\n opt_TPN = deepcopy(opt) # copy train options and change later\n opt_TPN.model = 'time_predictor'\n opt_TPN.name = opt.TPN\n opt_TPN.netD = 'time_input'\n opt_TPN.ndf = 16 # Change depending on the ndf size used with the TPN model specified\n # hard-code some parameters for TPN test phase\n opt_TPN.display_id = -1 # no visdom display;\n opt_TPN.isTrain = False\n print(\"Options TPN: {}\\n\\n\".format(opt_TPN))\n self.TPN = create_model(opt_TPN) # create a model given opt_TPN.model and other options\n self.TPN.setup(opt_TPN) # regular setup: load\n\n if self.isTrain:\n # define loss functions\n self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)\n self.criterionL1 = torch.nn.L1Loss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n # Check if lambda_L2 is in range [0,1]\n assert (0 <= self.opt.lambda_L2 <= 1)", "def __compile_model(self,\n network,\n loss=lasagne.objectives.categorical_crossentropy,\n learning_rate=0.001,\n momentum=0.1):\n print('Compiling model...')\n self.report['network'] = inspect.getsource(network)\n self.report['loss_function'] = loss.__name__\n self.report['learning_rate'] = learning_rate\n self.report['learning_momentum'] = momentum\n start_time = time.time()\n self.__input_var = T.tensor4('inputs')\n self.__target_var = T.ivector('targets')\n self.__network = network(self.__input_var)\n self.__loss = lambda t: loss(get_output(self.__network,\n deterministic=t),\n self.__target_var).mean()\n self.__optimizer = lasagne.updates.nesterov_momentum(\n self.__loss(False), # enable dropout during training\n get_all_params(self.__network, trainable=True),\n learning_rate=learning_rate,\n momentum=momentum)\n predictions = T.argmax(\n get_output(self.__network, deterministic=True),\n axis=1)\n # number of correct predictions\n n_correct = T.sum(T.eq(predictions, self.__target_var))\n # number of relevant images in the sample\n n_relevant = T.sum(self.__target_var)\n # number of images predicted to be relevant\n n_selected = T.sum(predictions)\n # number of correct predictions of relevance\n n_correct_relevant = T.sum(predictions & self.__target_var)\n statistics = [n_correct, n_selected, n_relevant, n_correct_relevant]\n self.__train_fn = theano.function(\n [self.__input_var, self.__target_var],\n [self.__loss(False)] + statistics,\n updates=self.__optimizer)\n self.__val_fn = theano.function(\n [self.__input_var, self.__target_var],\n [self.__loss(True)] + statistics)\n elapsed_time = time.time() - start_time\n self.report['time_to_compile'] = elapsed_time", "def model_fn_builder(model_config,\n train_params):\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = getattr(models, model_config.model_name)(config=model_config,\n is_training=is_training)\n _ = model(input_ids, input_mask=input_mask, token_type_ids=segment_ids)\n\n # TODO (@zhaoshenjian.01): check conditional_jit_scope\n # split loss calculation across batch\n batch_splits = train_params.get(\"batch_splits\", 1)\n if batch_splits == 1:\n # sparse_softmax_cross_entropy_with_logits\n masked_lm_output_dict = get_masked_lm_output(model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights)\n else:\n # use for large vocab\n masked_lm_output_dict = get_masked_lm_output_split_batch(\n model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights,\n batch_splits=batch_splits)\n\n masked_lm_loss = masked_lm_output_dict[\"loss\"]\n\n use_nsp = train_params.get(\"use_nsp\", True)\n if use_nsp:\n next_sentence_labels = features[\"next_sentence_labels\"]\n next_sentence_output_dict = get_next_sentence_output(\n model_config, model.get_pooled_output(), next_sentence_labels)\n next_sentence_loss = next_sentence_output_dict[\"loss\"]\n else:\n next_sentence_loss = 0\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.compat.v1.trainable_variables()\n # run init\n init_checkpoint = train_params.get(\"init_checkpoint\")\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map,\n initialized_variable_names) = get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint,\n assignment_map)\n return tf.train.Scaffold()\n scaffold_fn = tpu_scaffold\n logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n logging.info(\" name = {}, shape = {} {}\".format(var.name, var.shape,\n init_string))\n\n # default `bert_decay` lr_scheduler\n lr_params = train_params.get(\n 'lr_scheduler', {\n 'name': 'bert_decay',\n 'learning_rate': 1e-4,\n 'warmup_steps': 10000,\n 'num_train_steps': 1000000\n })\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op, _ = optimizers.create_optimizer(\n loss=total_loss,\n init_lr=lr_params['learning_rate'],\n num_train_steps=lr_params['num_train_steps'],\n num_warmup_steps=lr_params['warmup_steps'])\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n return output_spec\n raise NotImplementedError\n\n return model_fn", "def build_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model.summary()\n return model", "def _build_model(self, name, hidden_layers, nodes):\n with tf.variable_scope(name):\n self.inputs_ = tf.placeholder(tf.float32, [None, self.state_size], name='inputs')\n self.actions_ = tf.placeholder(tf.int32, [None], name='actions')\n one_hot_actions = tf.one_hot(self.actions_, self.action_size)\n self.targetQs_ = tf.placeholder(tf.float32, [None], name='target')\n self.layers = list()\n self.layers.append(fully_connected(\"hidden1\", self.inputs_, nodes))\n for layer in range(hidden_layers):\n self.layers.append(fully_connected(f\"hidden{layer+2}\", self.layers[layer], nodes))\n self.output = fully_connected(\"output\", self.layers[-1], self.action_size, activation=None)\n self.Q = tf.reduce_sum(tf.multiply(self.output, one_hot_actions), axis=1)\n self.loss = tf.reduce_mean(tf.square(self.targetQs_ - self.Q))\n self.opt = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)", "def make_model():\n \n model = models.Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu',\n input_shape=(150, 150, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu'))\n model.add(layers.Dense(37, activation='softmax'))\n \n #model.add(layers.Dense(1, activation='sigmoid'))\n \n\n model.compile(loss='sparse_categorical_crossentropy',\n optimizer=optimizers.RMSprop(lr=1e-4),\n metrics=['acc'])\n return model", "def model(self, img, label, bias, filters):\n prediction, z, flat, layers = self.predict(bias, img, filters)\n\n loss = self.categorical_crossentropy(prediction, label)\n\n # backpropagation\n dout = prediction - np.asarray(label).reshape((15, 1))\n dflat, dw8, db8, dw7, db7 = self.dense_layer_backprop(dout, flat, filters[6:8], bias[6:8], z)\n\n dconv6 = dflat.reshape(layers[-1].shape)\n dconv6[layers[-1] <= 0] = 0\n dconv5, df6, db6 = self.conv_layer_backprop(dconv6, layers[-2], filters[5])\n dconv5[layers[-2] <= 0] = 0\n dpool2, df5, db5 = self.conv_layer_backprop(dconv5, layers[-3], filters[4])\n dconv4 = self.pooling_layer_backprop(dpool2, layers[-4])\n dconv4[layers[-4] <= 0] = 0\n dconv3, df4, db4 = self.conv_layer_backprop(dconv4, layers[-5], filters[3])\n dconv3[layers[-5] <= 0] = 0\n dpool1, df3, db3 = self.conv_layer_backprop(dconv3, layers[-6], filters[2])\n dconv2 = self.pooling_layer_backprop(dpool1, layers[-7])\n dconv2[layers[-7] <= 0] = 0\n dconv1, df2, db2 = self.conv_layer_backprop(dconv2, layers[-8], filters[1])\n dconv1[layers[-8] <= 0] = 0\n dimg, df1, db1 = self.conv_layer_backprop(dconv1, img[0], filters[0])\n\n weight_gradients = [df1, df2, df3, df4, df5, df6, dw7, dw8]\n bias_gradients = [db1, db2, db3, db4, db5, db6, db7, db8]\n\n return weight_gradients, bias_gradients, loss", "def loss_fn(self, targets, outputs, model):", "def explain(self):\n # build the 2 versions of the model\n model = self.build_model()\n last_conv_model = self.build_cut_model()\n\n for i, label_name in enumerate(self.label_names):\n # This is the algorithm for the last convolution layer's tensor image\n # Get the index of the image that was classified correctly with the most confidence for the class\n predicted_col_proba = np.array(self.predicted_labels)[0][:, i]\n predicted_col_argsort = predicted_col_proba.argsort()[::-1]\n predicted_col = (predicted_col_proba > 0.2).astype(int)\n true_col = self.true_labels[:, 0]\n\n representative_image_index = None\n for most_probable_arg_index in predicted_col_argsort:\n if predicted_col[most_probable_arg_index] == true_col[most_probable_arg_index]:\n representative_image_index = most_probable_arg_index\n break\n\n # Resize the image to fit the neural network and keep the original resized image\n original_img = io.imread('{}/{}/{}'.format(path_to_img_directory, self.ex_format, np.array(self.image_names)[representative_image_index]))\n original_img = cv2.normalize(original_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n original_img = cv2.resize(original_img, dsize=(self.ex_input_size, self.ex_input_size), interpolation=cv2.INTER_CUBIC)\n img = np.expand_dims(original_img, axis=0)\n original_img = original_img[:, :, :3]\n\n # Get the output of the neural network for this image as a tensor\n model.predict(np.array(img))\n class_output = model.output[:, i]\n last_conv_layer = model.get_layer(self.ex_last_conv_layer_name1).output\n # if self.model_name == 'vit':\n # last_conv_layer = tf.nn.relu(tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024)))\n\n # Get the output for the cut model\n cut_img = last_conv_model.predict(np.array(img))[0]\n if self.model_name == 'vit':\n cut_img = np.reshape(cut_img[:256, :], (16, 16, 1024))\n cut_img = np.mean(cut_img, axis=-1)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n if self.model_name == 'vit':\n cut_img[0, 0] = np.mean(cut_img)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n cut_img = cv2.resize(cut_img, (self.ex_input_size, self.ex_input_size))\n\n # This is the algorithm of the Grad-CAM model\n # Refine the output of the last convolutional layer according to the class output\n grads = K.gradients(class_output, last_conv_layer)[0]\n if self.model_name == 'vit':\n last_conv_layer = tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024))\n last_conv_layer = last_conv_layer / tf.norm(last_conv_layer)\n\n grads = tf.reshape(grads[:, :256, :], (-1, 16, 16, 1024))\n grads = grads / tf.norm(grads)\n\n pooled_grads = K.mean(grads, axis=(0, 1, 2))\n iterate = K.function([model.input], [pooled_grads, last_conv_layer[0]])\n pooled_grads_value, conv_layer_output_value = iterate([img])\n for j in range(self.ex_last_conv_layer_filter_number):\n conv_layer_output_value[:, :, j] *= pooled_grads_value[j]\n\n # Create a 16x16 heatmap and scale it to the same size as the original image\n heatmap = np.mean(conv_layer_output_value, axis=-1)\n heatmap = np.maximum(heatmap, 0)\n heatmap /= np.max(heatmap)\n heatmap = cv2.resize(heatmap, (self.ex_input_size, self.ex_input_size))\n heatmap = np.uint8(255 * heatmap)\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n heatmap = cv2.normalize(heatmap, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n superimposed_img = cv2.addWeighted(original_img, 0.7, heatmap, 0.4, 0)\n\n # save the original image\n plt.matshow(original_img)\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'original', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the cut image\n plt.matshow(cut_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'cut', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the superimposed gradcam image\n plt.matshow(superimposed_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'gradcam', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)", "def make_NN(n_hidden, n_epoch, labelsdict, lr, device, model_name, trainloader, validloader, train_data, pretrain, finetune_whole, custom_model):\n if custom_model == 2:\n # Use custom two-layer convolution model\n print(\"Using Two-Layer CNN\")\n model = TwoLayerConvNet()\n elif custom_model == 5:\n print(\"Using Five-Layer CNN\")\n # Use custom five-layer convolution model\n model = FiveLayerConvNet()\n else:\n # Import NN model (either pretrained or not)\n model = getattr(models, model_name)(pretrained=pretrain)\n \"\"\" ===================================================================================== \"\"\"\"\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER FREEZE THE PARAMETERS OR NOT (WILBERT ARISTO) \"\"\"\n # If we do not need to finetune whole model, freeze parameters that we don't need to re-train\n if not finetune_whole:\n for param in model.parameters():\n param.requires_grad = False\n \"\"\" ===================================================================================== \"\"\"\"\n\n n_out = len(labelsdict)\n\n \"\"\" CHANGED LAST LAYER TO model.fc IF WE ARE USING RESNET MODEL (WILBERT ARISTO) \"\"\"\n if \"resnet\" in model_name:\n # Make classifier\n n_in = next(model.fc.modules()).in_features\n model.fc = NN_Classifier(input_size=n_in, output_size=n_out, hidden_layers=n_hidden)\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER OPTIMIZE ALL PARAMETERS OR JUST THE LAST LAYER'S PARAMS (WILBERT ARISTO) \"\"\"\n # Define optimizer\n if finetune_whole:\n optimizer = optim.Adam(model.parameters(), lr = lr)\n else:\n optimizer = optim.Adam(model.fc.parameters(), lr = lr)\n \"\"\" ============================================================================================================================ \"\"\"\"\n else:\n # Make classifier\n n_in = next(model.classifier.modules()).in_features\n model.classifier = NN_Classifier(input_size=n_in, output_size=n_out, hidden_layers=n_hidden)\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER OPTIMIZE ALL PARAMETERS OR JUST THE LAST LAYER'S PARAMS (WILBERT ARISTO) \"\"\"\n # Define optimizer\n if finetune_whole:\n optimizer = optim.Adam(model.parameters(), lr = lr)\n else:\n optimizer = optim.Adam(model.classifier.parameters(), lr = lr)\n \"\"\" ============================================================================================================================ \"\"\"\"\n \"\"\" ============================================================================================================================ \"\"\"\"\n\n # Define criterion\n criterion = nn.NLLLoss() \n\n model.to(device)\n start = time.time()\n\n epochs = n_epoch\n steps = 0 \n running_loss = 0\n print_every = 40\n for e in range(epochs):\n model.train()\n for images, labels in trainloader:\n images, labels = images.to(device), labels.to(device)\n\n steps += 1\n\n optimizer.zero_grad()\n\n output = model.forward(images)\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n if steps % print_every == 0:\n # Eval mode for predictions\n model.eval()\n\n # Turn off gradients for validation\n with torch.no_grad():\n test_loss, accuracy = validation(model, validloader, criterion, device)\n\n print(\"Epoch: {}/{} - \".format(e+1, epochs),\n \"Training Loss: {:.3f} - \".format(running_loss/print_every),\n \"Validation Loss: {:.3f} - \".format(test_loss/len(validloader)),\n \"Validation Accuracy: {:.3f}\".format(accuracy/len(validloader)))\n\n running_loss = 0\n\n # Make sure training is back on\n model.train()\n \n \"\"\" CHANGED LAST LAYER TO model.fc IF WE ARE USING RESNET MODEL (WILBERT ARISTO) \"\"\"\n if \"resnet\" in model_name:\n # Add model info \n model.fc.n_in = n_in\n model.fc.n_hidden = n_hidden\n model.fc.n_out = n_out\n model.fc.labelsdict = labelsdict\n model.fc.lr = lr\n model.fc.optimizer_state_dict = optimizer.state_dict\n model.fc.model_name = model_name\n model.fc.class_to_idx = train_data.class_to_idx\n else:\n # Add model info \n model.classifier.n_in = n_in\n model.classifier.n_hidden = n_hidden\n model.classifier.n_out = n_out\n model.classifier.labelsdict = labelsdict\n model.classifier.lr = lr\n model.classifier.optimizer_state_dict = optimizer.state_dict\n model.classifier.model_name = model_name\n model.classifier.class_to_idx = train_data.class_to_idx\n \"\"\" ============================================================================================================================ \"\"\"\"\n\n print('model:', model_name, '- hidden layers:', n_hidden, '- epochs:', n_epoch, '- lr:', lr)\n print(f\"Run time: {(time.time() - start)/60:.3f} min\")\n return model\n\n# Define function to save checkpoint\ndef save_checkpoint(model, path):\n checkpoint = {'c_input': model.classifier.n_in,\n 'c_hidden': model.classifier.n_hidden,\n 'c_out': model.classifier.n_out,\n 'labelsdict': model.classifier.labelsdict,\n 'c_lr': model.classifier.lr,\n 'state_dict': model.state_dict(),\n 'c_state_dict': model.classifier.state_dict(),\n 'opti_state_dict': model.classifier.optimizer_state_dict,\n 'model_name': model.classifier.model_name,\n 'class_to_idx': model.classifier.class_to_idx\n }\n torch.save(checkpoint, path)\n \n# Define function to load model\ndef load_model(path):\n cp = torch.load(path)\n \n # Import pre-trained NN model \n model = getattr(models, cp['model_name'])(pretrained=True)\n \n # Freeze parameters that we don't need to re-train \n for param in model.parameters():\n param.requires_grad = False\n \n # Make classifier\n model.classifier = NN_Classifier(input_size=cp['c_input'], output_size=cp['c_out'], \\\n hidden_layers=cp['c_hidden'])\n \n # Add model info \n model.classifier.n_in = cp['c_input']\n model.classifier.n_hidden = cp['c_hidden']\n model.classifier.n_out = cp['c_out']\n model.classifier.labelsdict = cp['labelsdict']\n model.classifier.lr = cp['c_lr']\n model.classifier.optimizer_state_dict = cp['opti_state_dict']\n model.classifier.model_name = cp['model_name']\n model.classifier.class_to_idx = cp['class_to_idx']\n model.load_state_dict(cp['state_dict'])\n \n return model", "def modelo(hidden_layers=[1], activation='tanh',features=1, \r\n beta_1=0.9, beta_2=0.999,lr=0.001, decay=1e-6, dropout=0):\r\n \r\n input_layer = layers.Input(shape=(features,))\r\n vmiddle = layers.Dense(hidden_layers[0], \r\n kernel_initializer='random_uniform')(input_layer)\r\n vmiddle = layers.Activation(activation)(vmiddle)\r\n vmiddle = layers.Dropout(dropout)(vmiddle)\r\n \r\n if len(hidden_layers) != 1:\r\n\r\n for item in range(1,len(hidden_layers)):\r\n vmiddle = layers.Dense(hidden_layers[item], \r\n kernel_initializer='random_uniform')(vmiddle)\r\n vmiddle = layers.Activation(activation)(vmiddle)\r\n vmiddle = layers.Dropout(dropout)(vmiddle)\r\n vmiddle =layers.Dense(1, kernel_initializer='random_uniform')(vmiddle)\r\n vexit =layers.Activation('sigmoid')(vmiddle)\r\n \r\n else:\r\n vmiddle =layers.Dense(1, kernel_initializer='random_uniform')(vmiddle)\r\n vexit =layers.Activation('sigmoid')(vmiddle)\r\n\r\n model = models.Model(inputs=input_layer, outputs=vexit)\r\n model.compile(loss='binary_crossentropy', \r\n optimizer=optimizer.Adam(beta_1=beta_1, beta_2=beta_2, lr=lr, decay=decay,), \r\n metrics=['accuracy'])\r\n \r\n return model", "def baseline_model(optimizer='rmsprop', init='glorot_uniform', dropout=0.2):\n model = keras.models.Sequential()\n model.add(Dropout(dropout, input_shape=(12,)))\n model.add(Dense(12, input_dim=12, kernel_initializer=init, activation='relu'))\n model.add(Dropout(dropout))\n model.add(Dense(1, kernel_initializer=init))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model", "def build_model():", "def build_model(self):\n \n # initalizing generators\n self.g12 = G12(conv_dim=self.numGenFilter, domainA_channels = self.domainA_channels, domainB_channels = self.domainB_channels)\n self.g21 = G21(conv_dim=self.numGenFilter, domainA_channels = self.domainA_channels, domainB_channels = self.domainB_channels)\n \n # initializing discriminators\n self.d1 = D1(conv_dim=self.numDiscFilter, domainA_channels = self.domainA_channels, use_labels=self.use_labels)\n self.d2 = D2(conv_dim=self.numDiscFilter, domainB_channels = self.domainB_channels, use_labels=self.use_labels)\n \n\n g_params = list(self.g12.parameters()) + list(self.g21.parameters())\n d_params = list(self.d1.parameters()) + list(self.d2.parameters())\n \n self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])\n self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])\n \n if torch.cuda.is_available():\n self.g12.cuda()\n self.g21.cuda()\n self.d1.cuda()\n self.d2.cuda()", "def CNN_RegModel(input_shape=(1,72,72),output_dim=6,lr=0.0008,model_path='model.h5'):\n model=Sequential()\n model.add(Convolution2D(filters=16,nb_row=5,nb_col=5, padding='same',data_format='channels_first',input_shape=input_shape))\n\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same',data_format='channels_first'))\n\n model.add(Convolution2D(filters=32,nb_row=5,nb_col=5, padding='same',data_format='channels_first'))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same',data_format='channels_first'))\n \n model.add(Convolution2D(filters=64,nb_row=5,nb_col=5, padding='same',data_format='channels_first'))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same',data_format='channels_first'))\n \n model.add(Convolution2D(filters=128,nb_row=5,nb_col=5, padding='same',data_format='channels_first'))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same',data_format='channels_first'))\n \n model.add(Flatten())\n model.add(Dense(5184))\n model.add(Activation('relu'))\n\n model.add(Dense(500))\n model.add(Activation('relu'))\n\n model.add(Dense(100))\n model.add(Activation('relu'))\n model.add(Dense(output_dim))\n model.add(Activation('tanh'))\n\n adam=Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-6, amsgrad=False)\n model.compile(loss='mse',optimizer=adam,metrics=['acc']) \n # define the checkpoint\n filepath = model_path\n checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')\n callbacks_list = [checkpoint]\n # print(model.summary())\n return model, callbacks_list", "def _build_model(self):\n # Define input layer (states)\n states = layers.Input(shape=(self.state_size,), name='states')\n\n # Add hidden layers\n net = layers.Dense(units=self.hidden_size, activation='relu')(states)\n net = layers.Dropout(self.dropout_rate)(net)\n net = layers.Dense(units=self.hidden_size * 2, activation='relu')(net)\n net = layers.Dropout(self.dropout_rate)(net)\n net = layers.Dense(units=self.hidden_size, activation='relu')(net)\n net = layers.Dropout(self.dropout_rate)(net)\n\n # Add final output layer with sigmoid activation\n raw_actions = layers.Dense(\n units=self.action_size,\n activation='sigmoid',\n name='raw_actions'\n )(net)\n\n # Scale [0, 1] output for each action dimension to proper range\n actions = layers.Lambda(\n lambda x: (x * self.action_range) + self.action_low,\n name='actions'\n )(raw_actions)\n\n # Create Keras model\n self.model = models.Model(inputs=states, outputs=actions)\n\n # Define loss function using action value (Q value) gradients\n action_gradients = layers.Input(shape=(self.action_size,))\n loss = K.mean(-action_gradients * actions)\n\n # Define optimizer and training function\n optimizer = optimizers.Adam()\n updates_op = optimizer.get_updates(\n params=self.model.trainable_weights,\n loss=loss,\n )\n self.train = K.function(\n inputs=[self.model.input, action_gradients, K.learning_phase()],\n outputs=[],\n updates=updates_op\n )", "def easy_drive():\n model = Sequential()\n model.add(Dense(10, activation=\"relu\",input_dim=2))\n model.add(Dense(10, activation=\"relu\"))\n #model.add(Dropout(0.9))\n model.add(Dense(2))\n model.compile(optimizer=\"adam\", loss=\"mse\", metrics=['accuracy'])\n return model", "def model_train(self, u0, t, data):\n \n # Set the number of threads for this program to one\n torch.set_num_threads(1)\n \n # Define the closure function that consists of resetting the\n # gradient buffer, loss function calculation, and backpropagation\n # The closure function is necessary for LBFGS optimizer, because\n # it requires multiple function evaluations\n # The closure function returns the loss value\n def closure():\n \n # Set the model to training mode\n self.model.train()\n \n # Reset the gradient buffer (set to 0)\n self.optimizer.zero_grad()\n \n # Calculate the model prediction (full field solution)\n ode_pred = odeint(self.model, u0.to(self.params.device),\n t.to(self.params.device), rtol=1e-5, atol=1e-6)\n \n # Extract the breakthrough curve from the full field solution prediction\n cauchy_mult = self.model.flux_modules[0].cauchy_mult * self.model.flux_modules[0].D_eff\n pred = ((ode_pred[:,0,-2] - ode_pred[:,0,-1]) * cauchy_mult).squeeze()\n \n # Calculate the loss function using the sum squared error metric\n loss = self.params.error_mult * torch.sum((data.to(self.params.device)\n - pred)**2)\n \n # Extract the predicted retardation factor function for physical\n # regularization\n u = torch.linspace(0.0, 2.0, 100).view(-1,1).to(self.params.device)\n ret_temp = self.model.flux_modules[0].coeff_nn(u)\n \n # Physical regularization: value of the retardation factor should\n # decrease with increasing concentration\n loss += self.params.phys_mult * torch.sum(\n torch.relu(ret_temp[:-1] - ret_temp[1:]))\n \n # Backpropagate to obtain gradient of model parameters\n loss.backward()\n \n return loss\n \n # Plot the predicted retardation factor as a function of dissolved\n # concentration and update at each training epoch\n fig, ax = plt.subplots()\n u = torch.linspace(0.01, 2.00, 100).view(-1,1).to(self.params.device)\n ret_pred = 1 / self.model.flux_modules[0].coeff_nn(u) / 10**self.model.flux_modules[0].p_exp\n ax_pred, = ax.plot(u.cpu(), ret_pred.cpu().detach())\n plt.title('Predicted Retardation Factor',fontsize=16)\n plt.xlabel(r'$c_{diss}$ [mg/L]',fontsize=16)\n plt.ylabel(r'$R$',fontsize=16)\n plt.tight_layout()\n \n # Iterate until maximum epoch number is reached\n for epoch in range(self.start_epoch, self.params.epochs):\n \n # Start timer\n a = time.time()\n \n # Update the model parameters and record the loss value\n self.optimizer.step(closure)\n loss = closure()\n self.train_loss.append(loss.item())\n \n # If the training loss is lower than the best loss value,\n # update the best loss and save the model\n if self.train_loss[-1] < self.best_loss:\n self.best_loss = self.train_loss[-1]\n if self.params.save_model:\n thread = Thread(target=self.save_model_to_file(\n epoch))\n thread.start()\n \n # Write the loss values to the tensorboard log file\n self.tb.add_scalar('training_loss', self.train_loss[-1], epoch)\n \n # Stop the timer\n b = time.time()\n \n # Print out the epoch status\n print('Training: Epoch [%d/%d], Training Loss: %.4f, Runtime: %.4f secs'\n %(epoch + 1, self.params.epochs, self.train_loss[-1], b - a))\n \n # Update the retardation factor plot\n ret_pred = 1 / self.model.flux_modules[0].coeff_nn(u) / 10**self.model.flux_modules[0].p_exp\n ax_pred.set_ydata(ret_pred.cpu().detach())\n ax.relim()\n ax.autoscale_view()\n plt.draw()\n plt.pause(0.0001)\n \n # Load model from the latest saved checkpoint (i.e. with the lowest\n # training error)\n if self.params.save_model:\n self.checkpoint = torch.load(self.model_save_file)\n self.model.load_state_dict(self.checkpoint['state_dict'])\n self.model.to(self.params.device)\n \n # Plot the retardation factor and save if required\n ret_pred = 1 / self.model.flux_modules[0].coeff_nn(u) / 10**self.model.flux_modules[0].p_exp\n ax_pred.set_ydata(ret_pred.cpu().detach())\n ax.relim()\n ax.autoscale_view()\n plt.draw()\n plt.pause(0.0001)\n if self.params.save_model:\n plt.savefig(self.params.model_path + \"\\\\\" + self.params.model_name + \"_retardation.png\")", "def compile(self, optimizer, lr):\n \n #clip_morm = 0.1\n self.loss_f = None\n with self.graph.as_default():\n \n tvars = tf.trainable_variables()\n ft_vars = [v for v in tvars if \"_fe\" in v.name] \n lab_vars = [v for v in tvars if \"_dc\" not in v.name]\n dom_vars = [v for v in tvars if \"_lp\" not in v.name]\n\n print()\n print(\" ft updates:\", ft_vars)\n print(\"96x3 updates:\", lab_vars)\n print(\" 1x3 updates:\", dom_vars)\n print()\n\n # `tf.nn.softmax_cross_entropy_with_logits` is deprcated.\n # https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits\n self.loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.labels, logits=self.output, name='cross_entropy')\n self.loss_adv = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.labels_adv, logits=self.output_adv, name='cross_entropy_adv')\n \n #grads_and_vars = optimizer.compute_gradients(loss, var_list=tf_vars)\n #clipped_grads_and_vars = [(tf.clip_by_norm(grad, clip_norm=clip_norm), var) for grad, var in grads_and_vars]\n \n self.loss_fe = - lam * self.loss_adv\n # `tf.control_dependencies` is necessary if `tf.layers.batch_normalization` is in the model\n # https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization\n self.train_step_adv = optimizer(lr).minimize(self.loss_fe, name='minimize_fe', var_list=ft_vars)\n self.train_step = optimizer(lr).minimize(self.loss, name='minimize', var_list=lab_vars)\n self.train_step_adv = optimizer(lr).minimize(self.loss_adv, name='minimize_adv', var_list=dom_vars)\n\n # Initialize all `tf.Variable`.\n self.session.run(tf.global_variables_initializer())" ]
[ "0.70582205", "0.69713557", "0.68715394", "0.68682396", "0.675125", "0.66826314", "0.6662389", "0.6640771", "0.6632258", "0.6623443", "0.662218", "0.6608138", "0.66036206", "0.6591534", "0.65909654", "0.65843874", "0.65824467", "0.6580098", "0.657169", "0.65641236", "0.65623116", "0.6558", "0.6547517", "0.6539792", "0.65321106", "0.6530054", "0.6495189", "0.6481626", "0.6478066", "0.6471766", "0.6459619", "0.64584386", "0.6436821", "0.64322376", "0.6421599", "0.6418774", "0.64132035", "0.6411085", "0.64090955", "0.64080745", "0.63971955", "0.6393785", "0.63934946", "0.6393376", "0.6392462", "0.63918316", "0.6387408", "0.6386674", "0.6384001", "0.63825923", "0.6379927", "0.6377451", "0.6376778", "0.6367329", "0.63641065", "0.63587254", "0.6354467", "0.63536286", "0.6350865", "0.6343527", "0.6327274", "0.6325107", "0.63214654", "0.6317765", "0.6314963", "0.6310212", "0.6308891", "0.6303916", "0.63021123", "0.63020223", "0.63002384", "0.62987465", "0.62965035", "0.6295052", "0.62943566", "0.6291126", "0.6291072", "0.6286454", "0.6283993", "0.62817085", "0.62799466", "0.6274219", "0.6274149", "0.6273921", "0.6273511", "0.626759", "0.62664735", "0.62651974", "0.6262023", "0.62570053", "0.62560785", "0.6252251", "0.62496054", "0.6248514", "0.62475175", "0.6242332", "0.62336355", "0.6228689", "0.6227644", "0.6224238", "0.6223442" ]
0.0
-1
Extract embedding from internal Keras model
def extract_embedding(self, from_model): return from_model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gensim_to_keras(model):\n return model.wv.get_keras_embedding()", "def get_embedding(self, model):\n embedding = []\n for node in range(len(self.graph.nodes())):\n embedding.append(list(model[str(node)]))\n embedding = np.array(embedding)\n return embedding", "def gensim_to_keras(model):\n layer = model.wv.get_keras_embedding()\n return (layer)", "def get_embeddings() -> tuple:\n # Initialize the model loading Universal Sentense Encoder\n # into a KerasLayer from Kaggle dataset file\n model = tf.keras.Sequential(\n [KerasLayer(encoder_path, input_shape=[], dtype=tf.string,\n output_shape=[512], trainable=False),\n # tf.keras.layers.Layer(512, dtype=tf.float16) # To reduce memory footprint\n ]\n )\n\n train_emb = model.predict(data_train['text'])\n print('Train texts converted into embeddings. Shape:', train_emb.shape)\n\n test_emb = model.predict(data_test['text'])\n print('Test texts converted into embeddings. Shape:', test_emb.shape)\n\n return train_emb, test_emb", "def concept_embedding(concept_model: ConceptDetectionModel2D):\n return concept_model.to_embedding()", "def embed_word(self):\n return self.emb.get_keras_embedding(dropout = self.emb_dropout,\n trainable = self.trainable_emb,\n input_length = self.sent_maxlen)", "def embed_word(self):\n return self.emb.get_keras_embedding(trainable = self.trainable_emb,\n input_length = self.sent_maxlen)", "def pretrained_embedding_layer(model,model2,model3, word_to_index,emb_dim_max):\n words_ignored = []\n vocab_len = len(word_to_index) + 1 \n emb_matrix = np.zeros([vocab_len,emb_dim_max])\n \n print(' Total words would be processed : '+str(vocab_len))\n for word, idx in word_to_index.items():\n if word in model:\n emb_matrix[idx,:200] = model[word]\n emb_matrix[idx,200:] = 0\n if word in model2:\n emb_matrix[idx, :100] = model2[word]\n emb_matrix[idx, 100:] = 0\n if word in model3.keys():\n emb_matrix[idx,:] = model3[word]\n else:\n words_ignored.append(word)\n print(str(len(words_ignored))+\" words ignored\")\n print(emb_matrix.shape) \n \n \n embedding_layer = Embedding(vocab_len,emb_dim_max,trainable = True)\n \n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer,words_ignored", "def model_extract_document_embedding(self):\n input_ids = tf.keras.layers.Input(shape=(self.maxlen,), dtype=tf.int32, name=\"ids\")\n attention_mask = tf.keras.layers.Input(shape=(self.maxlen,), dtype=tf.int32, name=\"att\")\n token = tf.keras.layers.Input(shape=(self.maxlen,), dtype=tf.int32, name=\"tok\")\n\n # Embedding :\n if self.method_embedding == 'CamemBERT':\n Camembert_model = transformers.TFCamembertModel.from_pretrained(\"jplu/tf-camembert-base\")\n x = Camembert_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'FlauBERT':\n # lr = 0.00001\n Flaubert_model = transformers.TFFlaubertModel.from_pretrained(\"jplu/tf-flaubert-base-uncased\")\n x = Flaubert_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'XLM-RoBERTa':\n # lr = 0.00001\n XLMRoBERTa_model = transformers.TFXLMRobertaModel.from_pretrained(\"jplu/tf-xlm-roberta-base\")\n x = XLMRoBERTa_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'RoBERTa':\n # Experience Test path weights :\n PATH = '/kaggle/input/tf-roberta/'\n config = transformers.RobertaConfig.from_pretrained(PATH + 'config-roberta-base.json')\n Roberta_model = transformers.TFRobertaModel.from_pretrained(PATH + 'pretrained-roberta-base.h5',\n config=config)\n # Sinon :\n # Roberta_model = transformers.TFRobertaModel.from_pretrained('roberta-base')\n x = Roberta_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'BERT':\n BERT_model = transformers.TFBertModel.from_pretrained('bert-base-uncased')\n x = BERT_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n else:\n logger.critical(\"unknown embedding method name : '{}'\".format(self.method_embedding))\n\n # word vectors shape : (None, maxlen, 768)\n x = x[0]\n cls_token = x[:, 0, :]\n\n model = tf.keras.models.Model(inputs=[input_ids, attention_mask, token], outputs=cls_token)\n return model", "def embedding_layer(self):\n with tf.name_scope(\"Embedding_Layer\"):\n V_size = len(self.vocab)\n embed_dim = len(self.embed[0]) \n W_embed_ = tf.get_variable(\"W_embed\",shape=[V_size, embed_dim],trainable=False).assign(np.asarray(self.embed))\n W_analogy_embed_ = tf.get_variable(\"W_analogy_embed\",shape=[V_size, embed_dim],trainable=True,initializer=tf.random_uniform_initializer(minval=-1,maxval=1))\n return W_embed_, W_analogy_embed_", "def _get_embedding_layer(self, input_data, doc_input_data):\n opts = self._options\n word_embedding = tf.Variable(tf.random_uniform((self.vocab_size, opts.embed_dim), -1.0, 1.0))\n embed = []\n\n temp = tf.zeros([opts.batch_size, opts.embed_dim])\n embed_d = []\n for n in range(opts.sentence_sample):\n temp = tf.add(temp, tf.nn.embedding_lookup(word_embedding, doc_input_data[:, n]))\n embed_d.append(temp)\n\n if opts.concat == 'True':\n combined_embed_vector_length = opts.embed_dim * opts.window_size + opts.embed_dim\n for j in range(opts.window_size):\n embed_w = tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed.append(embed_w)\n embed.append(embed_d)\n else:\n combined_embed_vector_length = opts.embed_dim\n embed_w = tf.zeros([opts.batch_size, opts.embed_dim])\n for j in range(opts.window_size):\n embed_w += tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed_w += embed_d\n embed.append(embed_w)\n\n return tf.concat(embed, 1), word_embedding, combined_embed_vector_length", "def get_embeddings(model, loader, device=torch.device('cpu')):\n embeddings = []\n labels = []\n for item in loader:\n data, label = item\n data = data.view(-1, 1, data.shape[-1])\n data = data.to(device)\n label = label.to(device)\n output = model(data).squeeze(1)\n\n embedding = output.cpu().data.numpy()\n label = label.cpu().data.numpy()\n embeddings.append(embedding)\n labels.append(label)\n\n embeddings = np.array(embeddings)\n labels = np.array(labels)\n\n return embeddings, labels", "def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n \n vocab_size = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n any_word = list(word_to_vec_map.keys())[0]\n emb_dim = word_to_vec_map[any_word].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n \n ### START CODE HERE ###\n # Step 1\n # Initialize the embedding matrix as a numpy array of zeros.\n # See instructions above to choose the correct shape.\n emb_matrix = np.zeros((vocab_size, emb_dim))\n \n # Step 2\n # Set each row \"idx\" of the embedding matrix to be \n # the word vector representation of the idx'th word of the vocabulary\n for word, idx in word_to_index.items():\n emb_matrix[idx, :] = word_to_vec_map[word]\n\n # Step 3\n # Define Keras embedding layer with the correct input and output sizes\n # Make it non-trainable.\n embedding_layer = tensorflow.keras.layers.Embedding(input_dim = vocab_size, output_dim = emb_dim, trainable = False)\n ### END CODE HERE ###\n\n # Step 4 (already done for you; please do not modify)\n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n \n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer", "def __glove_embed__(sequence, model):\n embedded = []\n for word in sequence:\n embedded.append(model[word])\n return embedded", "def _get_embedding(self, data):\n # Tensor(n, c)\n cat = data['cat']\n return self.one_hot_embed(cat)", "def get_embed(input_data, vocab_size, embed_dim):\n embedding = tf.Variable(tf.random_uniform((vocab_size,embed_dim), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, input_data)\n #print (\"embed_dim: \",embed_dim) # 向量表达维度为 256\n #print (\"input_data.shape: \",input_data.shape) # (50, 5)\n #print (\"embed.shap: \", embed.shape) # word 的向量表达 ==特征 (50, 5, 256) ==(batch_size, num_step, embed_dim)\n return embed # 返回input的向量表达", "def embedding(self, images):\n predict = self.model.predict(images)\n return predict", "def gen_embedding(text, model, tokenizer):\n ### Tokenize the texts\n encoded_input = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors='pt')\n \n ### Encode the tokenized data with model\n with torch.no_grad():\n model_output = model(**encoded_input)\n \n ### Pool the outputs into a single vector\n sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])\n return sentence_embeddings", "def add_embedding(self):\n ### YOUR CODE HERE (~4-6 lines)\n embeddingTensor = tf.Variable(self.pretrained_embeddings)\n embeddings = tf.nn.embedding_lookup(embeddingTensor, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [-1, self.max_length, Config.n_features * Config.embed_size])\n ### END YOUR CODE\n return embeddings", "def bert_embed(data, bert_model, BATCH_SIZE = 16, MAX_LEN = 128):\n \n dataset = TensorDataset(\n data['input_ids'], data['attention_masks'], data['indices']\n )\n sampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=sampler, batch_size=BATCH_SIZE)\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print('Running on ' + device.type)\n if device.type == 'cuda':\n bert_model.cuda() # put bert in training mode\n \n N = data['indices'].shape[0]\n X = np.zeros((N, 768))\n pos = 0\n for batch in dataloader:\n batch = tuple(t.to(device) for t in batch)\n b_input_ids, b_input_masks, b_indices = batch\n \n with torch.no_grad():\n embeddings = bert_model(\n b_input_ids.view(-1, MAX_LEN),\n b_input_masks.view(-1, MAX_LEN)\n )[2]\n # Take the mean of the last 4 hidden states\n embeddings = (embeddings[-4] + embeddings[-3] + embeddings[-2] + embeddings[-1])/4\n for j, label_ind in enumerate(b_indices.cpu().detach().numpy()):\n X[pos,:] = embeddings[j, int(label_ind), :].cpu().detach().numpy()\n pos+=1\n return X", "def keras_model_fn(_, config):\n\n f = open(config[\"embeddings_path\"],encoding='utf8')\n glove = f.readlines()[:config[\"embeddings_dictionary_size\"]]\n f.close()\n\n embedding_matrix = np.zeros((config[\"embeddings_dictionary_size\"], config[\"embeddings_vector_size\"]))\n for i in range(config[\"embeddings_dictionary_size\"]):\n if len(glove[i].split()[1:]) != config[\"embeddings_vector_size\"]:\n continue\n embedding_matrix[i] = np.asarray(glove[i].split()[1:], dtype='float32')\n\n cnn_model = tf.keras.Sequential()\n cnn_model.add(layers.Embedding(weights=[embedding_matrix],\n input_dim=config['embeddings_dictionary_size'],\n output_dim=config['embeddings_vector_size'],\n input_length=config['padding_size']))\n cnn_model.add(layers.Conv1D(filters=100,kernel_size=2,padding='valid',activation='relu',strides=1))\n cnn_model.add(layers.GlobalMaxPooling1D())\n cnn_model.add(layers.Dense(100, activation='relu'))\n cnn_model.add(layers.Dense(1, activation = 'sigmoid'))\n cnn_model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return cnn_model", "def get_embedding_output(self):\n return self.embedding_output", "def model(input_shape, model,model2,model3, word_to_index):\n \n ### START CODE HERE ###\n # Define sentence_indices as the input of the graph.\n # It should be of shape input_shape and dtype 'int32' (as it contains indices, which are integers).\n sentence_indices = Input(input_shape,dtype='int32')\n \n # Create the embedding layer pretrained with GloVe Vectors (≈1 line)\n embedding_layer, ignored_words = pretrained_embedding_layer(model,model2,model3,word_to_index,300)\n \n # Propagate sentence_indices through your embedding layer\n # (See additional hints in the instructions).\n embeddings = embedding_layer(sentence_indices)\n \n # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state\n # The returned output should be a batch of sequences.\n X = LSTM(units=128,input_shape=input_shape,return_sequences=True)(embeddings)\n # Add dropout with a probability of 0.5\n X = Dropout(rate=0.5)(X)\n # Propagate X trough another LSTM layer with 128-dimensional hidden state\n # The returned output should be a single hidden state, not a batch of sequences.\n X = LSTM(units=128,input_shape=input_shape,return_sequences=False)(X)\n # Add dropout with a probability of 0.5\n X = Dropout(rate=0.5)(X)\n # Propagate X through a Dense layer with 5 units\n X = Dense(units=num_classes)(X)\n# X = Dense(6, activation='softmax')\n # Add a softmax activation\n# print(X)\n# print(type(X))\n# print(X.shape)\n# print(sum(X))\n X = Activation('softmax')(X)\n \n # Create Model instance which converts sentence_indices into X.\n model = Model(inputs=sentence_indices,outputs=X)\n \n return model", "def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.pretrained_word_mat = tf.get_variable(\"word_emb_mat\",\n [self.vocab.word_size() - 2, self.vocab.word_embed_dim],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[2:],\n dtype=tf.float32),\n trainable=False)\n self.word_pad_unk_mat = tf.get_variable(\"word_unk_pad\",\n [2, self.pretrained_word_mat.get_shape()[1]],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[:2],\n dtype=tf.float32),\n trainable=True)\n\n self.word_mat = tf.concat([self.word_pad_unk_mat, self.pretrained_word_mat], axis=0)\n self.p_emb = tf.nn.embedding_lookup(self.word_mat, self.p)\n self.q_emb = tf.nn.embedding_lookup(self.word_mat, self.q)", "def _get_embedding(self, data):\n embedding_list = [super()._get_embedding(data)]\n context = data['context']\n for i in range(context.shape[1]):\n embedding_list.append(getattr(self, f'context{i}')(context[:, i:i+1]))\n return torch.cat(embedding_list, dim=1)", "def test_extract_embeddings():\n with pytest.raises(OSError):\n model = BERTopic(bert_model='not_a_model')\n model._extract_embeddings([\"Some document\"])\n\n # model = BERTopic(bert_model='distilbert-base-nli-mean-tokens')\n # embeddings = model._extract_embeddings([\"Some document\"])\n #\n # assert isinstance(embeddings, np.ndarray)\n # assert embeddings.shape == (1, 768)", "def get_embedding_model_params(self, output_dict):\n output_dict['model_params'] = self.trained_model_params", "def get_embedding(self, resp):\n\n feed_dict = {self.anchor: resp}\n embedding = self.sess.run(self.embed_anchor, feed_dict=feed_dict)\n return embedding", "def forward(self, input_sentence):\n sentence = self.word_embedding(input_sentence)\n embedding = self.encoder(sentence)\n return embedding", "def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n emb_dim = word_to_vec_map[\"cucumber\"].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n emb_matrix = np.zeros((vocab_len, emb_dim)) # Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)\n for word, index in word_to_index.items(): # Set each row \"index\" of the embedding matrix to be the word vector representation of the \"index\"th word of the vocabulary\n emb_matrix[index, :] = word_to_vec_map[word]\n embedding_layer = Embedding(vocab_len, emb_dim, trainable = False) # Define Keras embedding layer with the correct output/input sizes, make it trainable. Use Embedding(...). Make sure to set trainable=False. \n embedding_layer.build((None,)) # Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the \"None\".\n embedding_layer.set_weights([emb_matrix]) # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n return embedding_layer", "def get_movie_embedding(self):\n raise NotImplementedError(\"has to be overwritten\")", "def test_extract_embeddings():\n docs = [\"some document\"]\n model = BERTopic(embedding_model=\"distilbert-base-nli-stsb-mean-tokens\")\n bertopic_embeddings = model._extract_embeddings(docs)\n\n assert isinstance(bertopic_embeddings, np.ndarray)\n assert bertopic_embeddings.shape == (1, 768)\n\n sentence_embeddings = embedding_model.encode(docs, show_progress_bar=False)\n assert np.array_equal(bertopic_embeddings, sentence_embeddings)", "def embed(self, loader, model):\n print(\" ** Embedding words\")\n\n words = loader.words\n vectors = [model.get_word_vector(word) for word in words]\n\n return [(w, *v) for w, v in zip(words, vectors)]", "def compute_user_input_embedding(txt, model):\r\n embeddings = []\r\n tokens = txt.split(\" \")\r\n for word in tokens:\r\n embeddings.append(model.wv[word])\r\n sentence_embedding = compute_average(embeddings)\r\n return sentence_embedding", "def get_embed(input_data, vocab_size, embed_dim):\n # todo 需要编程:\n # 1、构建嵌入矩阵的查找表\n lookup_w = tf.Variable(\n initial_value=tf.random_uniform([vocab_size, embed_dim], -1.0, 1.0)\n )\n # 2、获得嵌入输出\n embed = tf.nn.embedding_lookup(params=lookup_w, ids=input_data)\n # [N, n_steps, embed_size]\n return embed", "def embed_data(X, model, batch_size):\n n_batch = int(np.ceil(len(X) / batch_size))\n return np.vstack(\n [\n model.embedder(\n model.encoder(np.array(X[(i) * batch_size : (i + 1) * batch_size, :]))\n )\n for i in range(n_batch)\n ]\n )", "def testEmbeddings(self):\n input_data = {\n \"x\":\n constant_op.constant(\n np.array(np.random.random_sample((20)), dtype=np.int32))\n }\n\n class EmbeddingModel(keras.Model):\n\n def __init__(self):\n super(EmbeddingModel, self).__init__()\n self.shared_weights = self.add_weight(\n \"weights\",\n shape=(2000, 300),\n dtype=dtypes.float32,\n initializer=init_ops.random_normal_initializer(\n mean=0.0, stddev=300**(-0.5)))\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=(20), dtype=dtypes.int32)\n ])\n def func(self, x):\n return array_ops.gather(self.shared_weights, x)\n\n model = EmbeddingModel()\n root, output_func = self._freezeModel(model.func)\n self._testConvertedFunction(root, root.f, output_func, input_data)", "def convert(encoder, bert_model):\n num_layers = encoder._config[\"num_layers\"]\n num_attention_heads = encoder._config[\"num_attention_heads\"]\n hidden_size = encoder._config[\"hidden_size\"]\n head_size = hidden_size // num_attention_heads\n assert head_size * num_attention_heads == hidden_size\n encoder._embedding_layer.set_weights(\n [bert_model[\"embeddings.word_embeddings.weight\"]])\n encoder._embedding_norm_layer.set_weights([\n bert_model[\"embeddings.LayerNorm.weight\"],\n bert_model[\"embeddings.LayerNorm.bias\"]\n ])\n encoder._type_embedding_layer.set_weights(\n [bert_model[\"embeddings.token_type_embeddings.weight\"]])\n encoder._position_embedding_layer.set_weights(\n [bert_model[\"embeddings.position_embeddings.weight\"]])\n for layer_num in range(num_layers):\n encoder._transformer_layers[\n layer_num]._attention_layer._key_dense.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.attention.self.key.weight\"].T\n .reshape((hidden_size, num_attention_heads, head_size)),\n bert_model[f\"encoder.layer.{layer_num}.attention.self.key.bias\"]\n .reshape((num_attention_heads, head_size))\n ])\n encoder._transformer_layers[\n layer_num]._attention_layer._query_dense.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.attention.self.query.weight\"]\n .T.reshape((hidden_size, num_attention_heads, head_size)),\n bert_model[f\"encoder.layer.{layer_num}.attention.self.query.bias\"]\n .reshape((num_attention_heads, head_size))\n ])\n encoder._transformer_layers[\n layer_num]._attention_layer._value_dense.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.attention.self.value.weight\"]\n .T.reshape((hidden_size, num_attention_heads, head_size)),\n bert_model[f\"encoder.layer.{layer_num}.attention.self.value.bias\"]\n .reshape((num_attention_heads, head_size))\n ])\n encoder._transformer_layers[\n layer_num]._attention_layer._output_dense.set_weights([\n bert_model[\n f\"encoder.layer.{layer_num}.attention.output.dense.weight\"].T\n .reshape((num_attention_heads, head_size, hidden_size)),\n bert_model[f\"encoder.layer.{layer_num}.attention.output.dense.bias\"]\n ])\n encoder._transformer_layers[layer_num]._attention_layer_norm.set_weights([\n bert_model[\n f\"encoder.layer.{layer_num}.attention.output.LayerNorm.weight\"],\n bert_model[f\"encoder.layer.{layer_num}.attention.output.LayerNorm.bias\"]\n ])\n encoder._transformer_layers[layer_num]._intermediate_dense.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.intermediate.dense.weight\"].T,\n bert_model[f\"encoder.layer.{layer_num}.intermediate.dense.bias\"]\n ])\n encoder._transformer_layers[layer_num]._output_dense.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.output.dense.weight\"].T,\n bert_model[f\"encoder.layer.{layer_num}.output.dense.bias\"]\n ])\n encoder._transformer_layers[layer_num]._output_layer_norm.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.output.LayerNorm.weight\"],\n bert_model[f\"encoder.layer.{layer_num}.output.LayerNorm.bias\"]\n ])", "def get_embed(input_data, vocab_size, embed_dim):\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, input_data)\n\n return embed", "def get_predictions(payload):\n return sm_client.invoke_endpoint(EndpointName=EMBEDDING_MODEL_ENDPOINT_NAME, \n Body=payload,\n ContentType='application/json')", "def get_embed_params(model) -> List:\r\n return [param for name, param in model.named_parameters() if \"embed\" in name]", "def model(mtf_features, other_features, params, mesh, variable_dtype, context=None):\n\n x, batch_dim, sequence_dim, embd_dim, vocab_dim, embed_sequence_dim = parse_inputs(mtf_features, other_features)\n\n if is_incremental_inference(context):\n # reshape inputs if in inference mode\n x = mtf.gather(x, context.position - 1, sequence_dim)\n x = mtf.reshape(x, [batch_dim])\n\n use_axial_pos_emb = params[\"axial_pos_emb\"] is not None\n\n if not use_axial_pos_emb:\n # Use standard position encoding\n wpe = mtf.get_variable(mesh, \"wpe\", mtf.Shape([embed_sequence_dim, embd_dim]),\n initializer=tf.random_normal_initializer(stddev=0.01),\n master_dtype=variable_dtype.master_dtype,\n slice_dtype=variable_dtype.slice_dtype,\n activation_dtype=variable_dtype.activation_dtype)\n else:\n wpe = axial_positional_emb(embd_dim, mesh, params, variable_dtype)\n\n # Text encoding\n wte = mtf.get_variable(mesh, \"wte\", mtf.Shape([vocab_dim, embd_dim]),\n initializer=tf.random_normal_initializer(stddev=0.02),\n master_dtype=variable_dtype.master_dtype,\n slice_dtype=variable_dtype.slice_dtype,\n activation_dtype=variable_dtype.activation_dtype)\n\n with tf.variable_scope(\"token_embd\"):\n # Text embedding\n h = mtf.gather(wte, x, vocab_dim)\n if params[\"embed_dropout\"] > 0 and params[\"mode\"] == \"train\":\n h = mtf.dropout(h, rate=params[\"embed_dropout\"], name=\"wte_dropout\")\n\n with tf.variable_scope(\"pos_embd\"):\n # Positional embedding\n position_indices = mtf.range(mesh, sequence_dim, tf.int64) if not is_incremental_inference(context) else (\n context.position - 1)\n pos_emb = mtf.gather(wpe, position_indices, wpe.shape[0])\n if params[\"embed_dropout\"] > 0 and params[\"mode\"] == \"train\":\n pos_emb = mtf.dropout(pos_emb, rate=params[\"embed_dropout\"], name=\"wte_dropout\")\n h += pos_emb\n\n aux_losses = 0 # instantiate auxiliary losses (for MOE models)\n\n for layer in range(params[\"n_layer\"]):\n # attn blocks\n share_parameters = exists(params[\"share_parameters\"]) and params[\"share_parameters\"] == True\n block_scope = f\"h{layer}\" if not share_parameters else \"\"\n\n block_fn = block(params=params, scope=block_scope, layer_num=layer,\n bias=other_features[\"attn_bias\"],\n sequence_dim=sequence_dim,\n memory_length_dim=other_features[\"memory_length_dim\"],\n variable_dtype=variable_dtype,\n context=context)\n\n # If true and in train mode, enable gradient checkpointing\n recompute_grad = params[\"recompute_grad\"] and (params[\"mode\"] == \"train\") == True\n h, loss = block_fn(h) if not recompute_grad else mtf.recompute_grad(block_fn, [h])\n aux_losses += loss\n\n no_weight_tie_emb = params[\"no_weight_tie\"] == True\n if no_weight_tie_emb:\n with tf.variable_scope(\"wte_final_linear\"):\n logits = linear(h, \"linear_out\", vocab_dim, variable_dtype=variable_dtype, params=params)\n else:\n # Layer normalize & affine transform\n h = layer_norm(h, \"ln_f\", variable_dtype=variable_dtype)\n seq_dim = sequence_dim if not is_incremental_inference(context) else mtf.Dimension(\"sequence\", 1)\n with tf.variable_scope(\"wte_final_einsum\"):\n # Equivalent to tf.matmul\n logits = mtf.einsum([h, wte], output_shape=[batch_dim, seq_dim, vocab_dim])\n\n if params[\"mode\"] in [\"train\", \"eval\"]:\n labels = mtf_features[\"labels\"]\n z_loss = params.get(\"z_loss\", 1e-4) # an auxiliary loss used to stabilize mtf xentropy\n\n # Go to full precision for the logits \n logits = mtf.cast(logits, tf.float32)\n\n with tf.variable_scope(\"xentropy_final\"):\n loss_batch = mtf.layers.softmax_cross_entropy_with_logits(logits=logits, targets=labels,\n vocab_dim=logits.shape[-1], z_loss=z_loss)\n\n # For non-autoregressive models (masked language modeling training)\n # Make sure labels with padding tokens are not counted in the loss\n if not params[\"causal\"]:\n padding_id = params.get(\"padding_id\", 0)\n loss_batch = mtf.where(mtf.not_equal(labels, padding_id), loss_batch, mtf.zeros_like(loss_batch))\n\n with tf.variable_scope(\"reduce_mean_final\"):\n loss = mtf.reduce_mean(loss_batch)\n\n loss += aux_losses # Add on auxiliary losses (currently only used for MoE)\n loss /= params[\"num_microbatches\"]\n # Convert to train dtype\n loss = mtf.cast(loss, variable_dtype.slice_dtype)\n else:\n loss = None\n loss_batch = None\n\n # Cast back to checkpoint dtype\n logits = mtf.cast(logits, variable_dtype.master_dtype)\n return logits, loss, loss_batch", "def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.word_embeddings = tf.get_variable(\n 'word_embeddings',\n shape=(self.term_vocab.size(), self.term_vocab.embed_dim),\n initializer=tf.constant_initializer(self.term_vocab.embeddings),\n trainable=True\n )\n self.p_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.p)\n self.q_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.q)\n\n with tf.variable_scope('char_embedding'):\n self.char_embeddings = tf.get_variable(\n 'char_embeddings',\n shape=(self.char_vocab.size(), self.char_vocab.embed_dim),\n initializer=tf.constant_initializer(self.char_vocab.embeddings),\n trainable=True\n )\n self.p_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.p_char) # [batch, seqlen, max_char_num, embedding_size]\n self.q_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.q_char)\n\n self.p_char_emb = self.cnn_emb(self.p_char_emb, \"p_emb\")\n self.q_char_emb = self.cnn_emb(self.q_char_emb, \"q_emb\")\n '''\n self.p_char_emb = tf.reshape(self.p_char_emb, [-1, self.max_char_num, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [-1, self.max_char_num, self.emb_size])\n\n self.p_char_emb = cnn_layer.conv(self.p_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=None)\n self.q_char_emb = cnn_layer.conv(self.q_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=True)\n\n self.p_char_emb = tf.reduce_max(self.p_char_emb, axis=1) # [batch*seqlen, 1, emb_size]\n self.q_char_emb = tf.reduce_max(self.q_char_emb, axis=1)\n\n batch_size = tf.shape(self.p_word_emb)[0]\n self.p_char_emb = tf.reshape(self.p_char_emb, [batch_size, -1, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [batch_size, -1, self.emb_size])\n\n self.p_char_emb = tf.nn.dropout(self.p_char_emb, 0.95)\n self.q_char_emb = tf.nn.dropout(self.q_char_emb, 0.95)\n '''\n self.p_emb = tf.concat([self.p_word_emb, self.p_char_emb], -1)\n self.q_emb = tf.concat([self.q_word_emb, self.q_char_emb], -1)", "def build_embedding_layer(inputs_, vocab_size, embed_size):\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_size), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, inputs_)\n \n return embed", "def embed(documents, ctx_encoder, ctx_tokenizer, device):\n input_ids = ctx_tokenizer(\n documents[\"title\"],\n documents[\"text\"],\n truncation=True,\n padding=\"longest\",\n return_tensors=\"pt\",\n )[\"input_ids\"]\n embeddings = ctx_encoder(\n input_ids.to(device=device), return_dict=True\n ).pooler_output\n return {\"embeddings\": embeddings.detach().cpu().numpy()}", "def get_model_bicond_sepembed(batch_size, max_seq_length, input_size, hidden_size, target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout):\n\n # batch_size x max_seq_length\n inputs = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n inputs_cond = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n\n cont_train = True\n if pretrain == \"pre\": # continue training embeddings or not. Currently works better to continue training them.\n cont_train = False\n embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, input_size], -0.1, 0.1), # input_size is embeddings size\n name=\"embedding_matrix\", trainable=cont_train)\n embedding_matrix_cond = tf.Variable(tf.random_uniform([vocab_size, input_size], -0.1, 0.1),\n name=\"embedding_matrix\", trainable=cont_train)\n\n\n # batch_size x max_seq_length x input_size\n embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs)\n embedded_inputs_cond = tf.nn.embedding_lookup(embedding_matrix_cond, inputs_cond)\n\n\n # [batch_size x inputs_size] with max_seq_length elements\n # fixme: possibly inefficient\n # inputs_list[0]: batch_size x input[0] <-- word vector of the first word\n inputs_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length, embedded_inputs)]\n inputs_cond_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length, embedded_inputs_cond)]\n\n drop_prob = None\n if dropout:\n drop_prob = 0.1\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size, drop_prob, drop_prob)\n\n start_state = tf.zeros([batch_size, lstm_encoder.state_size])\n\n ### FORWARD\n\n # [h_i], [h_i, c_i] <-- LSTM\n # [h_i], [h_i] <-- RNN\n fw_outputs, fw_states = lstm_encoder(inputs_list, start_state, \"LSTM\")\n\n # running a second LSTM conditioned on the last state of the first\n fw_outputs_cond, fw_states_cond = lstm_encoder(inputs_cond_list, fw_states[-1],\n \"LSTMcond\")\n\n fw_outputs_fin = fw_outputs_cond[-1]\n\n ### BACKWARD\n bw_outputs, bw_states = lstm_encoder(inputs_list[::-1], start_state, \"LSTM_bw\")\n bw_outputs_cond, bw_states_cond = lstm_encoder(inputs_cond_list[::-1], bw_states[-1],\n \"LSTMcond_bw\")\n bw_outputs_fin = bw_outputs_cond[-1]\n\n outputs_fin = tf.concat(1, [fw_outputs_fin, bw_outputs_fin])\n\n\n if tanhOrSoftmax == \"tanh\":\n model = Projector(target_size, non_linearity=tf.nn.tanh, bias=True)(outputs_fin) # tf.nn.softmax\n else:\n model = Projector(target_size, non_linearity=tf.nn.softmax, bias=True)(outputs_fin) # tf.nn.softmax\n\n return model, [inputs, inputs_cond]", "def embed(query: str) -> dict:\n embedding = model.embed(query)\n return {\"embedding\": embedding, \"model\": model_name}", "def getEmbeddings(model, words):\n\tembeddings = {}\n\tfor word in words:\n\t\tembeddings[word] = model[word]\n\treturn embeddings", "def loadEmbModel(embFile, logger):\n logger.info(\"Loading Embedding Model\")\n f = open(embFile,'r')\n model = {}\n v = []\n for line in f:\n splitLine = line.split(' ')\n word = splitLine[0]\n try:\n embedding = np.array([float(val) for val in splitLine[1:]])\n except:\n logger.info(len(v), line)\n model[word] = embedding\n v.append(embedding)\n mean = np.array(v).mean(0)\n logger.info(mean.shape)\n model['<unk>'] = torch.tensor(mean)\n model['<pad>'] = torch.zeros(embedding.shape)\n model['<start>'] = torch.zeros(embedding.shape)\n model['<end>'] = torch.zeros(embedding.shape)\n logger.info(\"Done.\",len(model),\" words loaded!\")\n return model", "def get_embeddings(self, data):\n raise NotImplementedError()", "def _add_embedding_layer(model_1, model_2):\n result_layer = torch.nn.Embedding(\n model_1.num_embeddings, model_1.embedding_dim + model_2.embedding_dim\n )\n result_layer.weight = torch.nn.Parameter(\n torch.cat((model_1.weight.data, model_2.weight.data), dim=1)\n )\n return result_layer", "def keras_model_fn(model_config, vocab_size, embedding_size, embeddings):\n ## hyperparams\n model_name = model_config['model_name']\n num_class = model_config['num_class']\n lstm_hs = model_config['lstm_hs']\n gru_hs = model_config['gru_hs']\n learning_rate = model_config['learning_rate']\n \n ## build model - , weights=[embeddings[1]]\n inputs = ks.Input(shape=(None,), dtype='int32', name='inputs')\n embedded_sequences_ft1 = layers.Embedding(vocab_size, embedding_size, trainable = True, mask_zero = False)(inputs)\n embedded_sequences_ft2 = layers.Embedding(vocab_size, embedding_size, trainable = True, mask_zero = False)(inputs)\n concat_embed = layers.concatenate([embedded_sequences_ft1 ,embedded_sequences_ft2])\n concat_embed = layers.SpatialDropout1D(0.5)(concat_embed)\n x = layers.Bidirectional(layers.CuDNNLSTM(lstm_hs, return_sequences = True))(concat_embed)\n x, x_h, x_c = layers.Bidirectional(layers.CuDNNGRU(gru_hs, return_sequences = True, return_state = True))(x)\n x_1 = layers.GlobalMaxPool1D()(x)\n x_2 = layers.GlobalAvgPool1D()(x)\n x_out = layers.concatenate([x_1 ,x_2, x_h])\n x_out = layers.BatchNormalization()(x_out)\n outputs = layers.Dense(num_class, activation = 'softmax', name = 'outputs')(x_out) # outputs\n model = ks.Model(inputs, outputs, name = model_name)\n \n ## compile\n model.compile(loss = 'categorical_crossentropy', \n optimizer = ks.optimizers.Adam(lr=learning_rate, clipnorm=.25, beta_1=0.7, beta_2=0.99), \n metrics = ['categorical_accuracy', ks.metrics.TopKCategoricalAccuracy(k=3)]) # metric what?\n return model", "def forward(self, tgt, m, enc_embed, mask):\n bs = tgt.shape[0]\n enc_embed = enc_embed.permute(2, 0, 1)\n m = m.permute(2, 0, 1)\n tgt = tgt.permute(2, 0, 1)\n dec_embed = self.dec_embed.weight.unsqueeze(1).repeat(1, bs, 1)\n\n out = tgt\n for layer in self.decoder_layers:\n out = layer(out, m, \n pos=enc_embed,\n query_pos=dec_embed\n )\n \n return self.decoder_norm(out).permute(1, 2, 0), dec_embed.permute(1, 2, 0)", "def get_embedding(self, task_embedding, layer_id):\n layer_id_tensor = torch.tensor([layer_id], dtype=torch.long, device=self.device)\n layer_embedding = self.layer_id_embeddings(layer_id_tensor)\n layer_embedding = layer_embedding.view(-1)\n segment_id = torch.tensor([0, 1], dtype=torch.long, device=self.device)\n embeddings = torch.cat([task_embedding.view(1, -1), layer_embedding.view(1, -1)], axis=0)\n embeddings += self.token_type_embeddings(segment_id)\n embeddings = self.task_hypernet(embeddings.view(-1))\n if self.unique_hyper_net_layer_norm:\n embeddings = self.LayerNorm(embeddings)\n return embeddings", "def add_embedding(self):\n #with tf.variable_scope(\"RNN\", reuse = tf.AUTO_REUSE):\n embeddings = tf.get_variable(\"embeddings\", initializer = self.pretrained_embeddings,trainable=True)\n inputs = self.input_placeholder\n inputs = tf.reshape(inputs, [self.config.batch_size, -1 , self.config.n_features])\n embeddings = tf.nn.embedding_lookup(embeddings, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [self.config.batch_size, -1, self.config.n_features* self.config.embed_size])\n embeddings = tf.cast(embeddings, tf.float32)\n return embeddings", "def initialize_model(self):\n\n input_layer = Input(\n shape=(self.input_length,), \n dtype='int32', \n name='input'\n )\n\n if self.embedding_matrix is None:\n embedding = Embedding(\n output_dim=self.embedding_size,\n input_dim=self.vocabulary_size + 1, # for mask\n input_length=self.input_length,\n mask_zero=True,\n name='embedding'\n )(input_layer)\n else:\n embedding = Embedding(\n output_dim=self.embedding_size,\n input_dim=self.vocabulary_size + 1,\n input_length=self.input_length,\n mask_zero=True,\n weights=[np.vstack((np.zeros((1, self.embedding_size)),\n self.embedding_matrix))],\n name='embedding'\n )(input_layer)\n\n encoder = self.recurrent_cell(\n self.latent_dim,\n dropout=self.dropout,\n recurrent_dropout=self.dropout,\n name='encoder',\n recurrent_regularizer=l1_l2(*self.regularization)\n )\n\n if self.use_bidirection:\n encoder = Bidirectional(\n encoder,\n merge_mode='concat'\n )\n\n encoder = encoder(embedding)\n\n dense_1 = Dense(\n 1024,\n activation='tanh',\n name='dense_1',\n kernel_regularizer=l1_l2(*self.regularization)\n )(encoder)\n\n dense_2 = Dense(\n 512,\n activation='tanh',\n name='dense_2',\n kernel_regularizer=l1_l2(*self.regularization)\n )(dense_1)\n\n dropout = Dropout(self.dropout)(\n dense_2\n )\n\n prediction = Dense(\n 1,\n activation='sigmoid',\n name='prediction'\n )(dropout)\n\n model = Model(inputs=input_layer, outputs=prediction)\n\n # sparse_categorical_crossentropy\n model.compile(optimizer=Adam(lr=self.learning_rate),\n loss='binary_crossentropy',\n metrics=['acc'])\n\n self.model = model\n\n if self.verbose > 0:\n model.summary()\n\n return [model]", "def embeddings_layer(x, Wemb, dim_proj):\n\n n_words = x.shape[0]\n n_max_letters_in_word = x.shape[1]\n n_batch = x.shape[2]\n\n dist = Wemb[x.flatten()].reshape([n_words, n_max_letters_in_word, n_batch, dim_proj])\n return dist", "def encode_input_for_decoder(x_tensor, inp_lens_tensor, model_input_emb: EmbeddingLayer, model_enc: RNNEncoder):\n input_emb = model_input_emb.forward(x_tensor)\n (enc_output_each_word, enc_context_mask, enc_final_states) = model_enc.forward(input_emb, inp_lens_tensor)\n enc_final_states_reshaped = (enc_final_states[0].unsqueeze(0), enc_final_states[1].unsqueeze(0))\n return (enc_output_each_word, enc_context_mask, enc_final_states_reshaped)", "def embedding_trainable_variables(self) -> Sequence[tf.Variable]:\n return self._embedding_layer.trainable_variables", "def embedded(self, word_ids, embedding_tensor, scope=\"embedding\"):\n with tf.variable_scope(scope):\n with tf.device(\"/cpu:0\"):\n inputs = tf.nn.embedding_lookup(embedding_tensor, word_ids)\n return inputs", "def get_embeddings():\n # Load the raw embedding data\n X_train = np.load('./train_embeddings.npy')\n \n y_train = np.load('./train_labels.npy')\n \n X_valid = np.load('./valid_embeddings.npy')\n \n y_valid = np.load('./valid_labels.npy')\n \n X_test = np.load('./test_embeddings.npy')\n \n y_test = np.load('./test_labels.npy')\n\n #return X_train, y_train\n return X_train, y_train, X_valid, y_valid, X_test, y_test", "def embedding(org_input):\n # Create the embedding list\n for f in range(Config.num_feature):\n num_cat_value = Config.schema[f]\n\n if num_cat_value == 1:\n pass\n elif num_cat_value > 1:\n embed_dict[f] = tf.get_variable(\n name=\"embed_\" + str(f),\n shape=[num_cat_value, Config.embed_size[f]],\n trainable=True)\n else:\n raise ValueError(\"Schema values should be positive integers!\")\n\n # Create embedded inputs\n f_size = np.sum(Config.embed_size)\n embedded_input = embed_events(org_input, f_size)\n\n return embedded_input", "def Aut(A):\n return Embeddings(A,A)", "def embedding_setup(self, embedding, emb_trainable):\n if emb_trainable == True:\n emb_variable = tf.get_variable(\n name=\"embedding_matrix\", shape=embedding.shape,\n initializer = tf.constant_initializer(embedding))\n return emb_variable\n else:\n return embedding", "def _find_embeddings(snlp):\n embs = None\n for proc in snlp.processors.values():\n if hasattr(proc, \"pretrain\") and isinstance(proc.pretrain, Pretrain):\n embs = proc.pretrain\n break\n return embs", "def get_pretrained_embeddings(source_vocab,embed_df):\r\n \r\n num_tokens = len(source_vocab)\r\n embedding_dim = embed_df.shape[1]\r\n weights = np.zeros((num_tokens,embedding_dim),dtype=np.float32)\r\n \r\n for idx in range(num_tokens):\r\n token = source_vocab.lookup_index(idx)\r\n if token in embed_df.index:\r\n weights[idx,:] = embed_df.loc[token]\r\n else:\r\n weights[idx,:] = np.random.randn(1,embedding_dim)\r\n \r\n embed_tensor = torch.FloatTensor(weights)\r\n return embed_tensor", "def forward(self, input_variable):\r\n return self.embedding(input_variable)", "def build_model(allidx,MAX_LENGTH,onlyArg):\n wordidx, labelidx, featuresidx, extraidx=allidx\n posidx, neridx, depidx, distanceidx, chnkidx, wikineridx, dbpedianeridx, subneridx = featuresidx\n\n main_input = Input(shape=(MAX_LENGTH,), name='main_input', dtype='int32')\n inputNodes=[main_input]\n\n w2vmodel=\"../embeddings/Domain-Word2vec.model\"\n\n embedding_matrix,EMBEDDING_DIM,vocabulary_size=prepare.wv_embedded(wordidx,w2vmodel)\n \n x = Embedding(output_dim=EMBEDDING_DIM, weights=[embedding_matrix],input_dim=vocabulary_size, input_length=MAX_LENGTH, mask_zero=False)(main_input)\n numnode=int(EMBEDDING_DIM/2)\n\n # pos embedding\n inputNodes,pos_layer=layers.embedlayer(inputNodes,\"pos_input\",posidx,MAX_LENGTH)\n x=Concatenate()([x,pos_layer])\n numnode+=int(len(posidx)/2)\n\n # ner embedding\n inputNodes,ner_layer=layers.embedlayer(inputNodes,\"ner_input\",neridx,MAX_LENGTH)\n x=Concatenate()([x,ner_layer])\n numnode+=int(len(neridx)/2)\n\n inputNodes,wikiner_layer=layers.embedlayer(inputNodes,\"wikiner_input\",wikineridx,MAX_LENGTH)\n x=Concatenate()([x,wikiner_layer])\n numnode+=int(len(wikineridx)/2)\n\n inputNodes,dbpedianer_layer=layers.embedlayer(inputNodes,\"dbpedianer_input\",dbpedianeridx,MAX_LENGTH)\n x=Concatenate()([x,dbpedianer_layer])\n numnode+=int(len(dbpedianeridx)/2)\n\n # dep embedding\n inputNodes,dep0_layer=layers.embedlayer(inputNodes,\"dep0_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep0_layer])\n numnode+=int(len(depidx)/2)\n\n inputNodes,dep1_layer=layers.embedlayer(inputNodes,\"dep1_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep1_layer])\n numnode+=int(len(depidx)/2)\n\n inputNodes,dep2_layer=layers.embedlayer(inputNodes,\"dep2_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep2_layer])\n numnode+=int(len(depidx)/2)\n\n # chnk embedding\n inputNodes,lvl_layer=layers.embedlayer(inputNodes,\"lvl_input\",distanceidx,MAX_LENGTH)\n x=Concatenate()([x,lvl_layer])\n numnode+=int(len(distanceidx)/2)\n\n inputNodes,chnk_layer=layers.embedlayer(inputNodes,\"chnk_input\",chnkidx,MAX_LENGTH)\n x=Concatenate()([x,chnk_layer])\n numnode+=int(len(chnkidx)/2)\n\n # wikiclass embedding\n inputNodes,subner_layer=layers.embedlayer(inputNodes,\"subner_input\",subneridx,MAX_LENGTH)\n x=Concatenate()([x,subner_layer])\n numnode+=int(len(subneridx)/2)\n\n if onlyArg:\n neartrigger_input = Input(shape=(MAX_LENGTH,), name='neartrigger_input', dtype='int32')\n inputNodes.append(neartrigger_input)\n neartrigger_layer = Embedding(output_dim=EMBEDDING_DIM, weights=[embedding_matrix],input_dim=vocabulary_size, \\\n input_length=MAX_LENGTH, mask_zero=False)(neartrigger_input)\n x=Concatenate()([x,neartrigger_layer])\n numnode+=50\n inputNodes,x,numnode=layers.extralayer(inputNodes,x,numnode,extraidx,featuresidx,MAX_LENGTH)\n\n lstm_out = Bidirectional(LSTM(numnode, dropout=0.2,return_sequences=True))(x)\n numnode=int((numnode+len(labelidx))*2/3)\n\n if onlyArg:\n lstm_out = SeqSelfAttention(attention_activation='tanh', attention_width=5)(lstm_out)\n\n lstm_out = Dropout(0.2)(lstm_out)\n out = Dense(numnode)(lstm_out)\n\n crf = CRF(len(labelidx), sparse_target=False) # CRF layer\n main_output=crf(out)\n loss=crf_loss #crf.loss_function\n acc=[crf_accuracy]\n\n model = Model(inputs=inputNodes, outputs=main_output) \n model.compile(loss=loss,optimizer=Adam(0.001),metrics=acc)\n model.summary()\n\n return model", "def forward(self, input):\n if isinstance(input, tuple):\n check_input = input[0]\n else:\n check_input = input\n in_length, in_batch, nfeat = check_input.size()\n aeq(nfeat, len(self.emb_luts))\n\n emb = self.make_embedding(input)\n\n out_length, out_batch, emb_size = emb.size()\n aeq(in_length, out_length)\n aeq(in_batch, out_batch)\n aeq(emb_size, self.embedding_size)\n\n return emb", "def get_embeddings(faces):\n\t# convert into an array of samples\n\tsamples = np.asarray(faces, 'float32')\n\t# prepare the face for the model, e.g. center pixels\n\tsamples = preprocess_input(samples, version=2)\n\t# create a vggface model\n\tmodel = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3), pooling='avg')\n\t# perform prediction\n\tyhat = model.predict(samples)\n\treturn yhat", "def _read_adv_embeddings(identity, target):\n embeddings_file = os.path.join(\n FLAGS.output_directory,\n identity,\n FLAGS.attack_type,\n target\n )\n embeddings_file = os.path.join(FLAGS.image_directory,\n identity,\n 'embeddings.h5')\n with h5py.File(embeddings_file, 'r') as f:\n return f['embeddings'][:].astype(np.float32)", "def create_embedding_matrix(self):\n self.id2word = dict([(self.vocab[word]['id'], word) for word in self.vocab])\n vocab_size = len(self.vocab)\n result = np.zeros((vocab_size, self.embed_dim))\n unknown_token_set = set()\n\n found_words = 0\n avg = np.zeros(self.embed_dim)\n for _ in range(1, vocab_size): # skip PAD embedding (initialize as zero embedding)\n try:\n result[_] = self.pretrained_embedding[self.id2word[_]]\n avg += result[_]\n found_words += 1\n except:\n unknown_token_set.add(_)\n\n avg /= found_words\n for _ in unknown_token_set:\n result[_] = avg\n self.embedding = result\n return found_words, len(self.id2word)", "def __init__(self, input_length, EMBEDDING_DIM, MAX_NB_WORDS, MAX_SEQUENCE_LENGTH, EPOCH_SIZE, BATCH_SIZE):\n self.BATCH_SIZE = BATCH_SIZE\n self.EPOCH_SIZE = EPOCH_SIZE\n \n model = Sequential()\n model.add(layers.Input(input_length))\n model.add(layers.Embedding(input_dim=MAX_NB_WORDS, output_dim=EMBEDDING_DIM, input_length=input_length))\n model.add(layers.SpatialDropout1D(0.25))\n model.add(layers.Bidirectional(layers.GRU(50, return_sequences=True)))\n model.add(layers.Convolution1D(100, 3, activation=\"relu\"))\n model.add(layers.Dense(50, activation=\"relu\"))\n model.add(layers.Dropout(0.2))\n model.add(layers.Dense(1, activation=\"sigmoid\"))\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[\"accuracy\", Precision(), Recall()])\n\n model.summary()\n\n self.model = model", "def embedd_data(training_data_text, e_arr, e_dict):\n num_samples = len(training_data_text)\n embedded = np.zeros([num_samples, MAX_WORDS_IN_REVIEW, EMBEDDING_SIZE])\n for i in range(num_samples):\n review_mat = np.zeros([MAX_WORDS_IN_REVIEW, EMBEDDING_SIZE])\n # Iterate to either the end of the sentence of the max num of words, whichever is less\n for w in range(min(len(training_data_text[i]), MAX_WORDS_IN_REVIEW)):\n # assign embedding of that word or to the UNK token if that word isn't in the dict\n review_mat[w] = e_arr[e_dict.get(training_data_text[i][w], 0)]\n embedded[i] = review_mat\n return embedded", "def embeddings(self):\n self._ensure_is_connected()\n return self._embeddings", "def _get_bert_embeddings(data_generator, embedding_model: torch.nn.Module, metadata: False): \n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n \n with torch.set_grad_enabled(False):\n embeddings = {'ids': [],\n 'embeddings': [],\n 'labels': []\n }\n \n # get BERT training embeddings\n \n if metadata:\n for local_ids, local_data, local_meta, local_labels in data_generator:\n local_data, local_meta, local_labels = local_data.to(device).long().squeeze(1), \\\n local_meta, \\\n local_labels.to(device).long()\n\n #print(local_data[0].shape)\n augmented_embeddings = embedding_model(local_data, local_meta)\n\n embeddings['ids'].extend(np.array(local_ids))\n embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))\n embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))\n else:\n for local_ids, local_data, local_labels in data_generator:\n local_data, local_labels = local_data.to(device).long().squeeze(1), \\\n local_labels.to(device).long()\n #print(local_data[0].shape)\n augmented_embeddings = embedding_model(local_data)\n\n embeddings['ids'].extend(np.array(local_ids))\n embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))\n embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))\n \n return embeddings", "def handle_embeddings_request(model_name):\n if model_name in app.models:\n pipeline = app.models[model_name][\"object\"]\n if request.method == \"GET\":\n params = request.args.to_dict(flat=False)\n indices = params[\"resource_ids\"]\n embeddings = pipeline.retrieve_embeddings(indices)\n return (\n json.dumps({\n \"vectors\": dict(zip(indices, embeddings))\n }), 200,\n {'ContentType': 'application/json'}\n )\n else:\n if pipeline.is_inductive():\n auth_token = _retrieve_token(request)\n content = request.get_json()\n data = content[\"data\"]\n data_type = (\n content[\"data_type\"]\n if \"data_type\" in content else \"raw\"\n )\n preprocessor_kwargs = (\n content[\"preprocessor_kwargs\"]\n if \"preprocessor_kwargs\" in content else None\n )\n embedder_kwargs = (\n content[\"embedder_kwargs\"]\n if \"embedder_kwargs\" in content else None\n )\n data = _preprocess_data(data, data_type, auth_token)\n vectors = pipeline.run_prediction(\n data, preprocessor_kwargs, embedder_kwargs)\n\n if not isinstance(vectors, list):\n vectors = vectors.tolist()\n\n return (\n json.dumps({\"vectors\": vectors}), 200,\n {'ContentType': 'application/json'}\n )\n else:\n _respond_not_allowed(\n \"Model is transductive, prediction of \"\n \"embedding for unseen data is not supported\")\n\n else:\n return _respond_not_found()", "def set_embeddings(self):", "def createTheModel(vocabulary, window=configuration['mlp']['posWindow']):\n inputLayers, interLayers = [], []\n inputToken = Input((3 + configuration['embedding']['useB1'] + configuration['embedding']['useB-1'],))\n inputLayers.append(inputToken)\n tokenEmb = Embedding(len(vocabulary.tokenIndices), configuration['mlp']['tokenEmb'],\n trainable=configuration['mlp']['trainable'])(inputToken)\n tokenFlatten = Flatten()(tokenEmb)\n interLayers.append(tokenFlatten)\n posNum = (2 * window + 1) * (3 + configuration['embedding']['useB1'] + configuration['embedding']['useB-1'])\n inputPos = Input((posNum,))\n inputLayers.append(inputPos)\n posEmb = Embedding(len(vocabulary.posIndices), configuration['mlp']['posEmb'],\n trainable=configuration['mlp']['trainable'])(inputPos)\n posFlatten = Flatten()(posEmb)\n interLayers.append(posFlatten)\n\n interLayers = keras.layers.concatenate(interLayers)\n lastLayer = Dense(configuration['mlp']['dense1UnitNumber'],\n activation=configuration['nn']['dense1Activation'])(interLayers)\n # dropout=configuration['mlp']['dense1Dropout'])(interLayers)\n lastLayer = Dropout(configuration['mlp']['dense1Dropout'])(lastLayer)\n softmaxLayer = Dense(8 if enableCategorization else 4, activation='softmax')(lastLayer)\n return inputLayers, softmaxLayer", "def get_item_embeddings(model: Word2Vec) -> np.ndarray:\n logger.info(\"Getting item embeddings.\")\n item_embeddings = model.wv.get_normed_vectors()\n item_embeddings = np.array(item_embeddings)\n return item_embeddings", "def embed(x, phase):\r\n\r\n is_train = True if phase == 'train' else False\r\n\r\n # Input embedding: convert input vector to dimension of hp.hidden_units.\r\n embs = input_embedding(x, num_units=hp.hidden_units, embed_type=hp.embed_type)\r\n print('Size after input embedding: ', embs.get_shape())\r\n\r\n # Positional Encoding.\r\n embs += embedding(tf.tile(tf.expand_dims(tf.range(tf.shape(x)[1]), 0), [tf.shape(x)[0], 1]),\r\n vocab_size=hp.win_len, num_units=hp.hidden_units,\r\n zero_pad=False, scale=False, scope=\"enc_pe\")\r\n print(\"Size after positional encoding: \", embs.get_shape())\r\n\r\n # Attention blocks.\r\n for i in range(hp.num_blocks):\r\n with tf.variable_scope(\"num_blocks_{}\".format(i)):\r\n # Multi-head Attention\r\n embs = multihead_attention(queries=embs, keys=embs, num_units=hp.hidden_units,\r\n num_heads=hp.num_heads, dropout_rate=hp.dropout_rate,\r\n is_training=is_train, causality=False)\r\n\r\n # Feed Forward\r\n embs = feedforward(embs, num_units=[2 * hp.hidden_units, hp.hidden_units])\r\n print(\"Size after multi-head_attention: \", embs.get_shape())\r\n\r\n # Temporal pooling by averaging on the time dimension.\r\n embs = tf.reduce_mean(embs, axis=1)\r\n\r\n return embs", "def get_embeddings():\n embeddings = dict(get_coefs(*o.strip().split()) for o in open(EMBEDDING_FILE))\n return embeddings", "def get_sentence_embedding(sentence):\n tokenized_review, segments_ids, indexed_tokens = process(sentence)\n tokens_tensor = torch.tensor([indexed_tokens])\n segments_tensors = torch.tensor([segments_ids])\n if useGPU:\n tokens_tensor = tokens_tensor.to(\"cuda\")\n segments_tensors = segments_tensors.to(\"cuda\")\n\n # get token embeddings\n with torch.no_grad():\n output = model(tokens_tensor, token_type_ids=segments_tensors)\n\n # get pooled embedding using specified method\n if sentence_embedding_type == \"CLS\":\n return output.pooler_output\n elif sentence_embedding_type == \"avg\":\n return output.last_hidden_state.mean(axis=1)\n else:\n return None", "def embedding_layer(n_categories, embedding_dim, name=None):\n\n input_tensor = Input(shape=(1,))\n x = Embedding(n_categories, embedding_dim, name=name)(input_tensor)\n x = Reshape(target_shape=(embedding_dim,))(x)\n\n return input_tensor, x", "def __init__(self, embed_size, vocab):\n super(ModelEmbeddings, self).__init__()\n \n self.embed_size = embed_size\n self.char_embed_size = 50\n self.max_word_len = 21\n self.dropout_rate = 0.3\n self.vocab = vocab \n \n ## A4 code\n pad_token_idx = vocab.char2id['<pad>']\n self.embedding = nn.Embedding(num_embeddings =len(vocab.char2id),\n embedding_dim =self.char_embed_size,\n padding_idx =pad_token_idx,)\n \n self.CNN = CNN(char_embed_size=self.char_embed_size,\n num_filters=embed_size,\n max_word_length=self.max_word_len,)\n self.Highway = Highway(word_embed_size=self.embed_size)\n self.dropout = nn.Dropout(p=self.dropout_rate)\n ## End A4 code\n\n ### YOUR CODE HERE for part 1j\n\n\n ### END YOUR CODE", "def fit_transform(self, *args, **kwargs) -> np.ndarray:\n self.fit(*args, **kwargs)\n return self.embedding_", "def build_bilstm(self, verbose=True):\r\n word_ids = Input(batch_shape=(None, None), dtype='int32', name='word_input')\r\n inputs = [word_ids]\r\n\r\n if self._params.use_pretrain_embedding:\r\n if verbose: logging.info(\"initial word embedding with pretrained embeddings\")\r\n if self._params.word_embedding_dim == 100:\r\n glove_file = self._params.data_dir + '/glove.6B.100d.txt'\r\n elif self._params.word_embedding_dim == 300:\r\n glove_file = self._params.data_dir + '/glove.42B.300d.txt'\r\n else:\r\n logging.error(\"we only support glove embedding with dimension 100 or 300\")\r\n raise ValueError(\"unmatch word dimension, we only support glove embedding with dimension 100 or 300\")\r\n glove_embedding_index = load_glove(glove_file, self._params.word_embedding_dim)\r\n word_vocab = self.input_processor.word_vocab.vocab\r\n glove_embeddings_matrix = np.zeros([len(word_vocab), self._params.word_embedding_dim])\r\n for word, i in word_vocab.items():\r\n vector = glove_embedding_index.get(word)\r\n if vector is not None:\r\n glove_embeddings_matrix[i] = vector\r\n \r\n word_embeddings = Embedding(input_dim=glove_embeddings_matrix.shape[0],\r\n output_dim=glove_embeddings_matrix.shape[1],\r\n trainable=False,\r\n mask_zero=True,\r\n weights=[glove_embeddings_matrix],\r\n name='word_embedding')(word_ids)\r\n else:\r\n word_embeddings = Embedding(input_dim=self._params.word_vocab_size,\r\n output_dim=self._params.word_embedding_dim,\r\n mask_zero=True,\r\n name='word_embedding')(word_ids)\r\n\r\n input_embeddings = [word_embeddings]\r\n if self._params.use_char:\r\n char_ids = Input(batch_shape=(None, None, None), dtype='int32', name='char_input')\r\n inputs.append(char_ids)\r\n if self._params.char_feature == \"lstm\":\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n mask_zero=True,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level lstm features\")\r\n char_feas = TimeDistributed(Bidirectional(LSTM(self._params.char_lstm_size)), name=\"char_lstm\")(char_embeddings)\r\n elif self._params.char_feature == \"cnn\":\r\n # cnn do not support mask\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level cnn features\")\r\n char_feas = char_cnn_encode(char_embeddings, self._params.n_gram_filter_sizes, self._params.n_gram_filter_nums)\r\n else:\r\n raise ValueError('char feature must be lstm or cnn')\r\n\r\n input_embeddings.append(char_feas)\r\n\r\n if self._params.use_pos:\r\n if verbose: logging.info(\"use pos tag features\")\r\n pos_ids = Input(batch_shape=(None, None), dtype='int32', name='pos_input')\r\n inputs.append(pos_ids)\r\n\r\n\r\n pos_embeddings = Embedding(input_dim=self._params.pos_vocab_size,\r\n output_dim=self._params.pos_embedding_dim,\r\n mask_zero=True,\r\n name='pos_embedding')(pos_ids)\r\n input_embeddings.append(pos_embeddings)\r\n\r\n if self._params.use_dict:\r\n if verbose: logging.info(\"use user dict features\")\r\n dict_ids = Input(batch_shape=(None, None), dtype='int32', name='dict_input')\r\n inputs.append(dict_ids)\r\n\r\n dict_embeddings = Embedding(input_dim=self._params.dict_vocab_size,\r\n output_dim=self._params.dict_embedding_dim,\r\n mask_zero=True,\r\n name='dict_embedding')(dict_ids)\r\n input_embeddings.append(dict_embeddings)\r\n\r\n input_embedding = Concatenate(name=\"input_embedding\")(input_embeddings) if len(input_embeddings)>1 else input_embeddings[0]\r\n input_embedding_ln = LayerNormalization(name='input_layer_normalization')(input_embedding)\r\n #input_embedding_bn = BatchNormalization()(input_embedding_ln)\r\n input_embedding_drop = Dropout(self._params.dropout, name=\"input_embedding_dropout\")(input_embedding_ln)\r\n\r\n z = Bidirectional(LSTM(units=self._params.main_lstm_size, return_sequences=True, dropout=0.2, recurrent_dropout=0.2),\r\n name=\"main_bilstm\")(input_embedding_drop)\r\n z = Dense(self._params.fc_dim, activation='tanh', name=\"fc_dense\")(z)\r\n\r\n if self._params.use_crf:\r\n if verbose: logging.info('use crf decode layer')\r\n crf = CRF(self._params.num_labels, sparse_target=False,\r\n learn_mode='marginal', test_mode='marginal', name='crf_out')\r\n loss = crf.loss_function\r\n pred = crf(z)\r\n else:\r\n loss = 'categorical_crossentropy'\r\n pred = Dense(self._params.num_labels, activation='softmax', name='softmax_out')(z)\r\n\r\n model = Model(inputs=inputs, outputs=pred)\r\n model.summary(print_fn=lambda x: logging.info(x + '\\n'))\r\n model.compile(loss=loss, optimizer=self._params.optimizer)\r\n\r\n self.model = model", "def fit_transform(self, X):\n self.fit(X)\n return self.embedding_", "def get_text_embeds(self, text):\n\n # tokenize the text\n text_input = self.tokenizer(text,\n padding='max_length',\n max_length=tokenizer.model_max_length,\n truncation=True,\n return_tensors='pt')\n # embed the text\n with torch.no_grad():\n text_embeds = self.text_encoder(text_input.input_ids.to(self.device))[0]\n return text_embeds", "def extract_embeddings(ds, config):\n from lidbox.models.keras_utils import KerasWrapper\n\n extractors = [(KerasWrapper.from_config_as_embedding_extractor_fn(e), _get_device_or_default(e))\n for e in config[\"extractors\"]]\n # ConcreteFunctions will be pretty-formatted starting from TF 2.3\n # https://www.tensorflow.org/guide/concrete_function#changes_for_tensorflow_23\n logger.info(\"Using %d extractors:\\n %s\",\n len(extractors),\n '\\n '.join(\"on device '{:s}':\\n {}\".format(d, _left_pad_lines(str(e), 2)) for e, d in extractors))\n\n def _append_embeddings(x):\n embeddings = []\n for extractor_fn, device in extractors:\n with tf.device(device):\n embeddings.append(extractor_fn(x[\"input\"]))\n return dict(x, embedding=tf.concat(embeddings, axis=1))\n\n batch_size = tf.constant(config.get(\"batch_size\", 1), tf.int64)\n logger.info(\"Batching inputs with batch size %s, extracting embeddings in batches.\", batch_size.numpy())\n ds = (ds.batch(batch_size)\n .prefetch(TF_AUTOTUNE)\n .map(_append_embeddings, num_parallel_calls=TF_AUTOTUNE))\n\n if not config.get(\"no_unbatch\", False):\n logger.info(\"Unbatching after embedding extraction\")\n ds = ds.unbatch()\n\n return ds", "def embedding(self, seqs):\n batch_size, seqlen = seqs.shape\n seqs = np.reshape(seqs, (-1)) # convert to 1-d indexes [(batch_sz*seqlen)]\n embs = self.word2vec[seqs] # lookup [(batch_sz*seqlen) x emb_sz]\n embs = np.reshape(embs, (batch_size, seqlen, -1)) # recover the shape [batch_sz x seqlen x emb_sz]\n return embs", "def build_model(self, input_shape, design_embedding, **kwargs):\n return design_embedding(input_shape)", "def load_pretrain_embedding(vocab, embed_size, embedding_path):\n model = KeyedVectors.load_word2vec_format(embedding_path)\n\n print('{} {}'.format(vocab.size(), embed_size))\n for token, id in vocab.token2id.items():\n if token in model:\n print('{} {}'.format(token, ' '.join(map(str, model[token]))))\n else:\n emb = np.random.random((embed_size,)) - 0.5\n print('{} {}'.format(token, ' '.join(map(str, emb))))", "def extract_features(self,\n embedded_tokens,\n encoder_out=None,\n encoder_padding_mask=None,\n **unused):\n # embed positions\n x = embedded_tokens\n\n x = F.dropout(x, p=self.dropout, training=self.training)\n future_mask = self.buffered_future_mask(x)\n # decoder layers\n for layer in self.layers:\n x = layer(\n x,\n encoder_out=encoder_out,\n encoder_padding_mask=encoder_padding_mask,\n future_mask=future_mask,\n )\n x = self.layer_norm(x)\n return x", "def anime_embedding_model(anime_index, tag_index, embedding_size=50, classification=False):\n\n # Both inputs are 1-dimensional\n anime = Input(name='anime', shape=[1])\n tag = Input(name='tag', shape=[1])\n\n # Embedding the anime (shape will be (None, 1, 50))\n anime_embedding = Embedding(name='anime_embedding',\n input_dim=len(anime_index),\n output_dim=embedding_size)(anime)\n\n # Embedding the tag (shape will be (None, 1, 50))\n tag_embedding = Embedding(name='tag_embedding',\n input_dim=len(tag_index),\n output_dim=embedding_size)(tag)\n\n # Merge the layers with a dot product along the second axis (shape will be (None, 1, 1))\n merged = Dot(name='dot_product', normalize=True, axes=2)([anime_embedding, tag_embedding])\n\n # Reshape to be a single number (shape will be (None, 1))\n merged = Reshape(target_shape=[1])(merged)\n\n # If classifcation, add extra layer and loss function is binary cross entropy\n if classification:\n merged = Dense(1, activation='sigmoid')(merged)\n model = Model(inputs=[anime, tag], outputs=merged)\n model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n # Otherwise loss function is mean squared error\n else:\n model = Model(inputs=[anime, tag], outputs=merged)\n model.compile(optimizer='Adam', loss='mse')\n\n return model", "def __init__(self, voc_size=8000, embed_size=100, hid_size=100, trunc=4,\n model=None):\n\n self.log = logging.getLogger(\"TEST.Embed\")\n self.log.setLevel(logging.INFO)\n\n self.unknown_token = \"UNKNOWN_TOKEN\"\n self.sentence_start_token = \"SENTENCE_START\"\n self.sentence_end_token = \"SENTENCE_END\"\n\n if model is None:\n self.log.info(\"Initializing RNN parameters and functions...\")\n\n self.vocabulary_size = voc_size\n self.embed_size = embed_size\n self.hidden_size = hid_size\n self.bptt_truncate = trunc\n\n # Instantiate the network weights\n # I feel like the first and third are switched for some reason...\n # but it's pretty consistent in the example code. Perhaps it's\n # backwards for a purpose\n # The weights going from the input layer to the word embedding\n # layer (E, in tutorial)\n weights_ie = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (embed_size, voc_size))\n\n # The weights going from input layer to hidden layer\n # (U, in tutorial)\n weights_eh = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (3, hid_size, embed_size))\n\n # The weights going from hidden layer to hidden layer\n # (W, in tutorial)\n weights_hh = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (3, hid_size, hid_size))\n\n # The weights going from hidden layer to output layer\n # (V, in tutorial)\n weights_ho = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (voc_size, hid_size))\n\n # The bias for the hidden units (no bias applied to embedding layer)\n bias = np.zeros((3, hid_size))\n\n # The bias for the output units\n out_bias = np.zeros(voc_size)\n\n self.weights_ie = theano.shared(\n name='weights_ie',\n value=weights_ie.astype(theano.config.floatX))\n\n self.weights_eh = theano.shared(\n name='weights_eh',\n value=weights_eh.astype(theano.config.floatX))\n\n self.weights_hh = theano.shared(\n name='weights_hh',\n value=weights_hh.astype(theano.config.floatX))\n\n self.weights_ho = theano.shared(\n name='weights_ho',\n value=weights_ho.astype(theano.config.floatX))\n\n self.bias = theano.shared(\n name='bias',\n value=bias.astype(theano.config.floatX))\n\n self.out_bias = theano.shared(\n name='out_bias',\n value=out_bias.astype(theano.config.floatX))\n\n self.cache_ie = theano.shared(\n name='cache_ie',\n value=np.zeros(weights_ie.shape).astype(theano.config.floatX))\n\n self.cache_eh = theano.shared(\n name='cache_eh',\n value=np.zeros(weights_eh.shape).astype(theano.config.floatX))\n\n self.cache_hh = theano.shared(\n name='cache_hh',\n value=np.zeros(weights_hh.shape).astype(theano.config.floatX))\n\n self.cache_ho = theano.shared(\n name='cache_ho',\n value=np.zeros(weights_ho.shape).astype(theano.config.floatX))\n\n self.cache_bias = theano.shared(\n name='cache_bias',\n value=np.zeros(bias.shape).astype(theano.config.floatX))\n\n self.cache_out_bias = theano.shared(\n name='cache_out_bias',\n value=np.zeros(out_bias.shape).astype(theano.config.floatX))\n\n self.vocabulary = []\n self.word_to_index = {}\n self.index_to_word = []\n else:\n self.log.info(\"Loading model parameters from saved model...\")\n\n with open(model, \"rb\") as modelFile:\n params = cPickle.load(modelFile)\n\n self.vocabulary_size = params[0]\n self.embed_size = params[1]\n self.hidden_size = params[2]\n self.bptt_truncate = params[3]\n\n self.weights_ie = params[4]\n self.weights_eh = params[5]\n self.weights_hh = params[6]\n self.weights_ho = params[7]\n\n self.vocabulary = params[8]\n if not self.vocabulary[-1] == self.unknown_token:\n self.log.info(\"Appending unknown token\")\n self.vocabulary[-1] = self.unknown_token\n self.index_to_word = params[9]\n self.word_to_index = params[10]\n\n self.bias = params[11]\n self.out_bias = params[12]\n\n self.cache_ie = params[13]\n self.cache_eh = params[14]\n self.cache_hh = params[15]\n self.cache_ho = params[16]\n self.cache_bias = params[17]\n self.cache_out_bias = params[18]\n # End of if statement\n\n # Symbolic representation of one input sentence\n input = T.ivector('sentence')\n\n # Symbolic representation of the one output sentence\n output = T.ivector('sentence')\n\n # Symbolic representation of the cache decay for RMSprop\n decay = T.scalar('decay')\n\n # Stochastic Gradient Descent step\n learning_rate = T.scalar('learning_rate')\n\n def forward_propagate(word, previous_state):\n \"\"\"\n Vertically propagates one of the words.\n\n :type word: int\n :param word: the index of the current input word\n\n :type previous_state: T.dvector()\n :param word: the output of the hidden layer from the previous\n horizontal layer\n \"\"\"\n # Embedding layer\n word_vector = self.weights_ie[:, word]\n\n # GRU layer\n update_gate = T.nnet.hard_sigmoid(\n self.weights_eh[0].dot(word_vector) +\n self.weights_hh[0].dot(previous_state) +\n self.bias[0]\n )\n\n reset_gate = T.nnet.hard_sigmoid(\n self.weights_eh[1].dot(word_vector) +\n self.weights_hh[1].dot(previous_state) +\n self.bias[1]\n )\n\n hypothesis = T.tanh(\n self.weights_eh[2].dot(word_vector) +\n self.weights_hh[2].dot(previous_state * reset_gate) +\n self.bias[2]\n )\n\n current_state = (T.ones_like(update_gate) - update_gate) * hypothesis + update_gate * previous_state\n\n # Output layer\n current_output = T.nnet.softmax(\n self.weights_ho.dot(current_state) + self.out_bias\n )[0]\n\n # Not sure why current_output[0] and not just current_output...\n return [current_output, current_state]\n\n #######################################################################\n # Symbolically represents going through each input sentence word and\n # then calculating the state of the hidden layer and output word for\n # each word. The forward_propagate function is the one used to\n # generate the output word and hidden layer state.\n #######################################################################\n self.theano = {}\n\n [out, state], updates = theano.scan(\n forward_propagate,\n sequences=input,\n truncate_gradient=self.bptt_truncate,\n outputs_info=[None, dict(initial=T.zeros(self.hidden_size))],\n name=\"forward_propagate\"\n )\n\n # Predicts the output words for each word in the sentence\n prediction = T.argmax(out, axis=1)\n\n # Calculates the output error between the predicted output and the\n # actual output\n out_error = T.sum(T.nnet.categorical_crossentropy(out, output))\n\n # Symbolically represents gradient calculations for gradient descent\n d_weights_ie = T.grad(out_error, self.weights_ie)\n d_weights_eh = T.grad(out_error, self.weights_eh)\n d_weights_hh = T.grad(out_error, self.weights_hh)\n d_weights_ho = T.grad(out_error, self.weights_ho)\n d_bias = T.grad(out_error, self.bias)\n d_out_bias = T.grad(out_error, self.out_bias)\n\n # Symbolic theano functions\n self.forward_propagate = theano.function([input], out,\n name=\"forward_propagate\")\n self.predict = theano.function([input], prediction, name=\"predict\")\n self.calculate_error = theano.function([input, output], out_error,\n name=\"calculate_error\")\n self.bptt = theano.function([input, output],\n [d_weights_ie, d_weights_eh, d_weights_hh, d_weights_ho, d_bias,\n d_out_bias],\n name=\"bptt\")\n\n # RMSprop parameters\n cache_ie = (decay * self.cache_ie) + ((1 - decay) * d_weights_ie ** 2)\n cache_eh = (decay * self.cache_eh) + ((1 - decay) * d_weights_eh ** 2)\n cache_hh = (decay * self.cache_hh) + ((1 - decay) * d_weights_hh ** 2)\n cache_ho = (decay * self.cache_ho) + ((1 - decay) * d_weights_ho ** 2)\n cache_bias = (decay * self.cache_bias) + ((1 - decay) * d_bias ** 2)\n cache_out_bias = (decay * self.cache_out_bias) + ((1 - decay) * d_out_bias ** 2)\n eps = 1e-6 # Prevents division by 0\n\n self.sgd_step = theano.function(\n [input, output, learning_rate, theano.In(decay, value=0.9)],\n [],\n updates=[\n (self.weights_ie, self.weights_ie - learning_rate *\n d_weights_ie / (T.sqrt(self.cache_ie + eps))),\n (self.weights_eh, self.weights_eh - learning_rate *\n d_weights_eh / (T.sqrt(self.cache_eh + eps))),\n (self.weights_hh, self.weights_hh - learning_rate *\n d_weights_hh / (T.sqrt(self.cache_hh + eps))),\n (self.weights_ho, self.weights_ho - learning_rate *\n d_weights_ho / (T.sqrt(self.cache_ho + eps))),\n (self.bias, self.bias - learning_rate * d_bias /\n (T.sqrt(self.cache_bias + eps))),\n (self.out_bias, self.out_bias - learning_rate *\n d_out_bias / (T.sqrt(self.cache_out_bias + eps))),\n (self.cache_ie, cache_ie),\n (self.cache_eh, cache_eh),\n (self.cache_hh, cache_hh),\n (self.cache_ho, cache_ho),\n (self.cache_bias, cache_bias),\n (self.cache_out_bias, cache_out_bias)]\n )\n\n self.x_train = None\n self.y_train = None", "def _generate_embeddings(self, config): \n tr_parts = []\n te_parts = []\n all_columns = []\n for comp in self.components:\n tr_tmp, te_tmp, cols = comp.generate(config)\n if cols != None:\n print(tr_tmp.shape,te_tmp.shape)\n tr_parts.append(tr_tmp)\n te_parts.append(te_tmp)\n all_columns += cols\n X_train = np.concatenate(tr_parts, axis=1)\n X_test = np.concatenate(te_parts, axis=1)\n print(\"Concatenated size:\", X_train.shape, X_test.shape)\n self.feature_columns = all_columns\n return X_train, X_test", "def TransformerTokenEmbedding(\n num_embeddings, embedding_dim, padding_idx, freeze_embed=False\n):\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\n nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)\n nn.init.constant_(m.weight[padding_idx], 0)\n if freeze_embed:\n m.weight.requires_grad = False\n return m", "def source_embedding_fairseq(self):\r\n return tf.get_variable(\r\n name=\"W\",\r\n shape=[self.params[\"feature.dim\"], self.params[\"embedding.dim\"]],\r\n initializer=tf.random_normal_initializer(\r\n mean=0.0,\r\n stddev=0.1))", "def modify_to_return_embeddings(net, model_name):\n if model_name in [\"vgg_face_dag\", \"vgg_m_face_bn_dag\"]:\n net.fc8 = torch.nn.Sequential()\n else:\n msg = \"{} not yet supported\".format(model_name)\n raise NotImplementedError(msg)\n return net" ]
[ "0.7617932", "0.7358305", "0.7260656", "0.70529824", "0.70419127", "0.6889344", "0.6849663", "0.681078", "0.6796582", "0.67891294", "0.6745876", "0.66203314", "0.6616796", "0.659487", "0.6581204", "0.65696263", "0.6557681", "0.6531176", "0.649195", "0.6473366", "0.6431787", "0.64287907", "0.6404737", "0.63940465", "0.6381103", "0.63769853", "0.63761353", "0.637343", "0.634846", "0.63394153", "0.63342136", "0.6332294", "0.63285875", "0.632785", "0.6310781", "0.63019454", "0.62952036", "0.6286008", "0.627403", "0.62561256", "0.625379", "0.623676", "0.6234598", "0.6225723", "0.6170976", "0.61586684", "0.61559117", "0.6154044", "0.6147923", "0.6141838", "0.6131576", "0.6124339", "0.60991114", "0.60855836", "0.6074126", "0.60732317", "0.60673785", "0.6050661", "0.6038956", "0.60345006", "0.6027416", "0.602646", "0.60241175", "0.60215753", "0.6019841", "0.60176605", "0.60108614", "0.60081756", "0.59963274", "0.5987672", "0.5983934", "0.5976071", "0.59566975", "0.5953358", "0.5941854", "0.593935", "0.59267056", "0.59202003", "0.591798", "0.5915428", "0.5914855", "0.59147626", "0.59083915", "0.590299", "0.58984184", "0.58911926", "0.5873182", "0.58701366", "0.5850499", "0.5849354", "0.58492833", "0.58391714", "0.5834864", "0.58200806", "0.5814631", "0.5811882", "0.5808449", "0.5808325", "0.5797832", "0.57903594" ]
0.77565044
0
Get a logger that produces reasonable output.
def _get_logger(): logger = logging.getLogger(__name__) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(logging.Formatter("%(asctime)s [%(levelname)8s] %(message)s")) logger.addHandler(ch) logger.setLevel(logging.DEBUG) return logger
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_logger(name=\"unknown_logger\"):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(FORMATTER)\n logger.addHandler(handler)\n logger.propagate = False # to avoid printing the same logs multiple times\n return logger", "def _get_logger():\n return logging.Logger(__name__)", "def get_logger():\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter(fmt=\"%(asctime)s %(levelname)s %(name)s: %(message)s\",\n datefmt=\"%Y-%m-%d - %H:%M:%S\")\n if logger.hasHandlers():\n logger.handlers.clear()\n\n console = logging.StreamHandler(sys.stdout)\n console.setLevel(logging.INFO)\n console.setFormatter(formatter)\n\n logger.addHandler(console)\n\n return logger", "def get_logger(name: str) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.propagate = False\n logger.setLevel(logging.DEBUG)\n if not logger.handlers:\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(\"[%(asctime)s] %(message)s\"))\n logger.addHandler(handler)\n return logger", "def get_logger(logger_name='default'):\n log = logging.getLogger(logger_name)\n log.setLevel(logging.DEBUG)\n log_format = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setFormatter(log_format)\n if log.hasHandlers():\n log.handlers.clear()\n log.addHandler(ch)\n\n return log", "def get_logger():\r\n global logger\r\n \r\n if logger:\r\n return logger\r\n else:\r\n return create_logger()", "def get_logger(name):\n log = logging.getLogger(name)\n # we don't set the logger's level to inherit from the parent logger.\n if log.handlers:\n return log\n fmt = logging.Formatter(LOG_FMT)\n shdlr = logging.StreamHandler()\n shdlr.setFormatter(fmt)\n log.addHandler(shdlr)\n log.propagate = False\n return log", "def get_logger(name: str) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.addHandler(logging.StreamHandler(sys.stdout))\n logger.setLevel(logging.DEBUG)\n return logger", "def get_logger(name='default.log', level=logging.DEBUG):\n logger = logging.getLogger(name)\n logger.setLevel(level)\n hdlr = logging.StreamHandler()\n hdlr.setLevel(level)\n fmt = PrettyFormatter()\n hdlr.setFormatter(fmt)\n logger.addHandler(hdlr)\n return logger", "def get_logger(name):\n logger = logging.getLogger(name)\n if not logger.handlers:\n logger.propagate = 1 # propagate to parent\n console = logging.StreamHandler()\n logger.addHandler(console)\n formatter = logging.Formatter(\n '%(name)s - [%(levelname)s] - %(message)s')\n console.setFormatter(formatter)\n return logger", "def get_logger(log_name: str) -> logging.Logger:\n logger = logging.getLogger(log_name)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('%(asctime)s - %(name)s: %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def logger() -> logging.Logger:\n return logging.getLogger(__name__)", "def get_logger():\n logging.config.dictConfig(LOGGING_APPLICATION_CONF)\n logger = logging.getLogger(__name__)\n\n if not logger.handlers:\n logger.setLevel(logging.DEBUG)\n console_handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"%(asctime)s— %(levelname)s —\\\n %(funcName)s:%(lineno)d — %(message)s\")\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n return logger", "def get_logger():\n return logging.getLogger(__name__)", "def logger():\n return logging.getLogger(__name__)", "def get_logger(name):\n\n logger = logging.getLogger(name)\n if not logger.handlers:\n out = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n fmt='%(asctime)s - %(name)s - %(levelname)s \\\n - %(module)s - %(message)s'\n )\n out.setFormatter(formatter)\n logger.addHandler(out)\n logger.setLevel(get_config('LOGGING_LEVEL'))\n logger.propagate = False\n return logger", "def _get_logger(verbose: bool = False) -> logging:\n logger = logging.getLogger() # root logger\n if verbose:\n logger.setLevel(logging.DEBUG)\n format_str = '%(asctime)s - %(levelname)-8s - %(module)s:%(funcName)-20s - %(message)s'\n else:\n logger.setLevel(logging.INFO)\n format_str = '%(message)s'\n\n date_format = '%Y-%m-%d %H:%M:%S'\n if os.isatty(2):\n color_format = '%(log_color)s' + format_str\n colors = {'DEBUG': 'green',\n 'INFO': 'reset',\n 'WARNING': 'bold_yellow',\n 'ERROR': 'bold_red',\n 'CRITICAL': 'bold_red'}\n formatter = colorlog.ColoredFormatter(color_format, date_format, log_colors=colors)\n else:\n formatter = logging.Formatter(format_str, date_format)\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n return logging.getLogger(__name__)", "def get_logger(name: str):\n # setup logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger", "def get_logger(name):\n #### Configure Logger ####\n # Log to stdout\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter('%(asctime)s - %(message)s',\n '%m/%d/%Y %H:%M:%S')\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n return logger", "def get_logger(set_info=False):\n\n logging.basicConfig(format=\"%(message)s\", stream=sys.stdout)\n logger = logging.getLogger(\"pythonanywhere\")\n if set_info:\n logger.setLevel(logging.INFO)\n else:\n logger.setLevel(logging.WARNING)\n return logger", "def get_logger(name=\"LazySusan\"):\n level = get_level()\n _configure(level)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n\n return logger", "def get_logger(name: str) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.setLevel(__lvl__)\n ch = logging.StreamHandler()\n ch.setLevel(__lvl__)\n preformat = f'[{logger.name}]'\n # [%(threadName)s/%(levelname)s] = [MainThread/INFO]\n ch.setFormatter(logging.Formatter(fmt=preformat + ' %(levelname)s [%(asctime)s] %(message)s',\n datefmt='%H:%M:%S'))\n logger.addHandler(ch)\n return logger", "def _get_logger(self):\n return Logger(\"SLOTH\")", "def create_logger():\r\n global logger\r\n logger = logging.getLogger(logger_name)\r\n\r\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s')\r\n \r\n handler = logging.StreamHandler()\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n \r\n return logger", "def get_logger(level=logging.INFO, quite=False, debug=False, to_file=''):\n assert level in [logging.DEBUG, logging.INFO, logging.WARNING, logging.CRITICAL]\n logger = logging.getLogger('main')\n formatter = logging.Formatter('%(asctime)s - %(funcName)s - %(levelname)s - %(message)s')\n if debug:\n level = logging.DEBUG\n logger.setLevel(level=level)\n if not quite:\n if to_file:\n fh = logging.FileHandler(to_file)\n fh.setLevel(level=level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n else:\n ch = logging.StreamHandler()\n ch.setLevel(level=level)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger", "def _get_logger(title, verbose_lvl):\n\n logger = logging.getLogger(title)\n console = logging.StreamHandler()\n\n if verbose_lvl == 1:\n logger.setLevel(logging.INFO)\n console.setLevel(logging.INFO)\n elif verbose_lvl == 2:\n logger.setLevel(logging.DEBUG)\n console.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.WARNING)\n console.setLevel(logging.WARNING)\n\n fmt = logging.Formatter(\n '%(asctime)s %(name)s %(levelname)s %(message)s')\n console.setFormatter(fmt)\n logger.addHandler(console)\n\n return logger", "def get_console_logger(name=None):\n if name is None:\n name = os.path.splitext(os.path.basename(sys.argv[0]))[0]\n logger = logging.getLogger(name)\n\n # reset handlers\n logger.handlers = []\n sh = logging.StreamHandler()\n fmt = logging.Formatter(LOG_FMT)\n sh.setFormatter(fmt)\n logger.addHandler(sh)\n logger.setLevel(logging.INFO)\n\n return logger", "def _logger(self):\n logger = logging.getLogger(self.NAME)\n logger.setLevel(self.LOG_LEVEL)\n shandler = logging.StreamHandler(sys.stdout)\n fmt = '\\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'\n fmt += '%(lineno)d %(asctime)s\\033[0m| %(message)s'\n shandler.setFormatter(logging.Formatter(fmt))\n logger.addHandler(shandler)\n return logger", "def logger(self, name):\n logger, _ = get_stdout_logger(name, verbosity=self.verbosity)\n return logger", "def get_logger():\n # Prepare log directory.\n try:\n os.mkdir('logs')\n except FileExistsError:\n pass\n\n # Create logger and formatter.\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s %(message)s')\n\n # Create and attach stream handler.\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n # Create and attach file handler.\n file_handler = logging.handlers.TimedRotatingFileHandler(\n 'logs/log.txt', when='d', encoding='utf-8')\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n return logger", "def get_logger(name):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n\n # Console handler with a higher log level\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n\n logger.addHandler(ch)\n\n return logger", "def get_logger(self, verbose):\n log_levels = [logging.INFO, logging.DEBUG]\n\n log = logging.getLogger()\n log.setLevel(log_levels[int(verbose)])\n \n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(log_levels[int(verbose)])\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')\n ch.setFormatter(formatter)\n log.addHandler(ch)\n\n return log", "def _logger(self) -> logging.Logger:\n return logging.getLogger(\n type(self).__name__\n )", "def get_main_logger():\n\n # Use verbose debug logging for now.\n console_loglevel = VERBOSITY_LEVELS[2]\n file_loglevel = VERBOSITY_LEVELS[2]\n\n console_fmt = logging.Formatter(\n '%(name)s: %(levelname)s %(message)s')\n file_fmt = logging.Formatter(\n '%(asctime)s - %(name)s: %(levelname)s %(message)s')\n\n log = logging.getLogger('toggledarkly')\n\n console_log = logging.StreamHandler()\n console_log.setFormatter(console_fmt)\n console_log.setLevel(console_loglevel)\n log.addHandler(console_log)\n\n file_log = handlers.RotatingFileHandler(\n LOG_FILE_PATH, maxBytes=(1048576*5), backupCount=5\n )\n file_log.setFormatter(file_fmt)\n file_log.setLevel(file_loglevel)\n log.addHandler(file_log)\n\n if SYSTEMD_SUPPORT:\n journald_log = JournalHandler()\n journald_log.setLevel(file_loglevel)\n journald_log.setFormatter(console_fmt)\n log.addHandler(journald_log)\n \n log.setLevel(VERBOSITY_LEVELS[2])\n\n return log", "def logger(self) -> logging.Logger:\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s - %(name)-15s - [%(levelname)-10s] %(message)s\"\n )\n return logging.getLogger(os.path.basename(__file__))", "def get_logger(\n facility=None,\n level='warning',\n name=None,\n logfmt='%(name)s[%(process)d] %(levelname).1s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S ',\n child=None\n):\n\n dc(f\"facility={facility}, level={level}, name={name!r}, logfmt={logfmt!r}, datefmt={datefmt!r}, child={child!r}\")\n\n # If no name is provided, use the name of the current program (minus\n # any file extension).\n if not name:\n name=os.path.basename(sys.argv[0]).rsplit('.',1)[0]\n\n if facility is None:\n if child:\n # Initialize this log as a child of the main logger.\n dc(f\"Setting up child logger {child!r}.\")\n log=logging.getLogger().getChild(child)\n else:\n # Assume this process has already set up a logger (or just wants to\n # use the default logger), and return that.\n dc(f\"No facility, so getting root logger.\")\n log=logging.getLogger()\n if not log.handlers:\n # Suppress \"No handlers could be found ...\" message, in case our\n # root logger hasn't been set up. NullHandler is a bit bucket.\n log.addHandler(logging.NullHandler)\n if name:\n log.name=name\n dc(f\"Returning with logger {log!r}\")\n return log\n\n if not child:\n # Child loggers use the parent logger's facility, handler, and\n # formatting.\n h=None\n if isinstance(facility,logging.Handler):\n dc(f\"facility is logging.Handler {facility!r}\")\n # The caller has provided a handler for us.\n h=facility\n if isinstance(h,logging.StreamHandler):\n # Prefix our log format with the date and time.\n if 'asctime' in logfmt:\n logfmt='%(asctime)s '+logfmt\n f=logging.Formatter(logfmt,datefmt=datefmt)\n else:\n if isinstance(facility,str):\n dc(f\"facility is string {facility!r}\")\n if facility in syslog_facilities:\n # It looks like we're logging to syslog.\n facility=logging.handlers.SysLogHandler.facility_names[facility]\n else:\n # This string must be a filename, so open it for appending.\n dc(f\"Treating facility={facility!r} as a filename.\")\n facility=os.path.expanduser(os.path.expandvars(facility))\n dc(f\"Expanded filename is {facility!r}.\")\n if os.path.isfile(facility):\n mode='a'\n elif not os.path.exists(facility):\n mode='w'\n else:\n raise ValueError('\"%s\" exists but is not a regular file.'%(facility,))\n facility=open(facility,mode)\n\n if isinstance(facility,int):\n dc(f\"facility is integer {facility!r}\")\n # This is a syslog facility number, or had better be.\n system=platform.system()\n if system=='Darwin':\n h=logging.handlers.SysLogHandler(address='/var/run/syslog',facility=facility)\n elif system=='Linux':\n h=logging.handlers.SysLogHandler(address='/dev/log',facility=facility)\n else:\n dc(f\"Createing SysLogHandler for this logger.\")\n h=logging.handlers.SysLogHandler(\n address=('localhost',logging.handlers.SYSLOG_UDP_PORT),\n facility=facility\n )\n dc(f\"Createing logging.Formatter from logfmt={logfmt!r}\")\n f=logging.Formatter(logfmt)\n elif isinstance(facility,IOBase):\n dc(f\"facility is {facility!r}\")\n # This is a stream, so add date and time to the start of our log format.\n h=logging.StreamHandler(facility)\n logfmt='%(asctime)s'+logfmt\n dc(f\"Createing logging.Formatter from logfmt={logfmt!r}, datefmt={datefmt!r}\")\n f=logging.Formatter(logfmt,datefmt=datefmt)\n else:\n raise ValueError('bad log facility value: %r'%(facility,))\n\n if isinstance(level,str):\n # If level is a string, make sure it is upper case.\n level=level.upper()\n dc(f\"level is string {level!r}\")\n elif isinstance(level,int) and level in _nameToLevel:\n dc(f\"level is int {level!r}\")\n level=_nameToLevel[level]\n dc(f\"converted level is int {level!r}\")\n else:\n raise ValueError('bad log level value: %r'%(level,))\n\n # Now create the new logger, and return it to the caller.\n if not child:\n dc(f\"Applying formatter {f!r} to handler {h!r}\")\n h.setFormatter(f)\n log=logging.getLogger(name)\n dc(f\"Adding handler to logger\")\n log.addHandler(h)\n l=_nameToLevel[level]\n dc(f\"_nameToLevel[{level!r}]{_nameToLevel[level]!r}\")\n log.setLevel(_nameToLevel[level])\n dc(f\"Returning with logger {log!r}\")\n return log", "def get_logger():\n logger = logging.getLogger('sedbg')\n if not logger.handlers:\n sh = logging.StreamHandler()\n logger.addHandler(sh)\n logger.setLevel(logging.INFO)\n\n return logger", "def get_logger(self, name=\"amulet-logger\", level=logging.DEBUG):\n log = logging\n logger = log.getLogger(name)\n fmt = log.Formatter(\"%(asctime)s %(funcName)s \"\n \"%(levelname)s: %(message)s\")\n\n handler = log.StreamHandler(stream=sys.stdout)\n handler.setLevel(level)\n handler.setFormatter(fmt)\n\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def get_logger(level: Optional[int] = None) -> logging.Logger:\n logger = logging.getLogger(LOGGER_NAME)\n if level is not None:\n logger.setLevel(level)\n\n if not logger.handlers:\n formatter = logging.Formatter(fmt=\"%(levelname)-8s %(message)s\", datefmt=\"%H:%M:%S\")\n handler = logging.StreamHandler()\n if level is not None:\n handler.setLevel(level)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n return logger", "def get_logger(logger_name):\n logger = logging.getLogger(logger_name)\n\n logger.setLevel(app_config.LOG_LEVEL)\n logger.addHandler(get_console_handler())\n\n # with this pattern, it's rarely necessary to propagate\n # the error up to parent\n logger.propagate = False\n\n return logger", "def get_logger(verbose, debug):\n if debug:\n level = logging.DEBUG\n elif verbose:\n level = logging.INFO\n else:\n level = logging.WARNING\n\n # hahahahaha\n class UtcFormatter(logging.Formatter):\n converter = time.gmtime\n\n formatter = UtcFormatter(\"%(message)s\" if not verbose else \"%(asctime)s [%(levelname)-5s] %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S%z\")\n\n handler = logging.StreamHandler()\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n\n logger = logging.Logger('s3cfdeploy')\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def get_logger():\n logging.basicConfig(\n level=logging.DEBUG,\n format='[%(name)s] [%(asctime)s]: %(message)s')\n caller = whoami(offset=1)\n name = os.path.basename(caller)\n logger = logging.getLogger(name)\n return logger", "def get_logger(*, logger_name):\n\n logger = logging.getLogger(logger_name)\n\n logger.setLevel(logging.INFO)\n\n logger.addHandler(get_console_handler())\n logger.addHandler(get_file_handler())\n logger.propagate = False\n\n return logger", "def get_logger(level):\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"[%(name)s|%(asctime)s] %(message)s\")\n ch.setFormatter(formatter)\n\n logger = logging.getLogger(__name__)\n if not logger.handlers:\n logger.setLevel(level)\n logger.addHandler(ch)\n return logger", "def get_logger(logger_name):\n logger = logging.getLogger(logger_name)\n logger.setLevel(LOGGING_LEVEL)\n logger.addHandler(_handler_file())\n logger.addHandler(_handler_stdout())\n logger.propagate = False\n return logger", "def get_logger(name):\n return logging.getLogger(name)", "def get_standard_logger():\n standard_logger = logging.getLogger(\"instana\")\n\n ch = logging.StreamHandler()\n f = logging.Formatter('%(asctime)s: %(process)d %(levelname)s %(name)s: %(message)s')\n ch.setFormatter(f)\n standard_logger.addHandler(ch)\n standard_logger.setLevel(logging.DEBUG)\n return standard_logger", "def get_logger(name: str) -> logging.Logger:\n try:\n p = Path(name)\n if p.exists():\n name = str(p.absolute().relative_to(Path.cwd()).as_posix())\n except:\n pass\n logger = logging.getLogger(name)\n # logger.addHandler(TqdmLoggingHandler())\n return logger", "def get_logger(name):\n logger = logging.getLogger(name)\n # clear handlers if they were created in other runs\n if (logger.hasHandlers()):\n logger.handlers.clear()\n logger.setLevel(logging.DEBUG)\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n # create console handler add add to logger\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n # create file handler add add to logger when name is not None\n if name is not None:\n fh = logging.FileHandler(f'{name}.log')\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n return logger", "def create_logger() -> logging.Logger:\n pass # TODO: Replace with implementation!", "def get_logger(name):\n logger = logging.getLogger(name)\n # clear handlers if they were created in other runs\n if (logger.hasHandlers()):\n logger.handlers.clear()\n logger.setLevel(logging.DEBUG)\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n # create console handler add add to logger\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n # create file handler add add to logger when name is not None\n if name is not None:\n fh = logging.FileHandler(f'GNN-{name}.log')\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n return logger", "def get_logger(name=None):\n return logging.getLogger(\"bids-schema\" + (\".%s\" % name if name else \"\"))", "def getLogger(output_dir: Optional[Path] = None, *, root: bool = False, name: str = \"\") -> Logger:\n if root:\n global _root\n _root = name if name else Path(previousframe(2).filename).stem\n logger = gL(__package__)\n for hndl in list(logger.handlers):\n logger.removeHandler(hndl)\n logger.setLevel(DEBUG)\n logger.addHandler(_MakeHandler(StreamHandler, min_level=INFO, max_level=INFO, stream=sys.stdout))\n logger.addHandler(_MakeHandler(StreamHandler, min_level=WARNING, stream=sys.stderr))\n if output_dir:\n log_dir = (output_dir / \"log\").mkdir_hidden()\n log_file = log_dir / f\"{output_dir.resolve().name}.log\"\n logger.addHandler(_MakeHandler(FileHandler, filename=log_file, min_level=DEBUG, encoding=\"utf-8\"))\n return logger", "def logger(self):\n return logging", "def get_logger(name: str, log_path: str = os.path.join(os.path.dirname(__file__), \"main.log\"),\n console: bool = False) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\n # ensure that logging handlers are not duplicated\n for handler in list(logger.handlers):\n logger.removeHandler(handler)\n\n # rotating file handler\n if log_path:\n fh = RotatingFileHandler(path_join(log_path),\n maxBytes=10 * 2 ** 20, # 10 MB\n backupCount=1) # 1 backup\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # console handler\n if console:\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n # null handler\n if not (log_path or console):\n logger.addHandler(logging.NullHandler())\n\n return logger", "def setup_logger():\n logger = logging.getLogger(\"extract_brass_bedpe\")\n LoggerFormat = '[%(levelname)s] [%(asctime)s] [%(name)s] - %(message)s'\n logger.setLevel(level=logging.INFO)\n handler = logging.StreamHandler(sys.stderr)\n formatter = logging.Formatter(LoggerFormat, datefmt='%Y%m%d %H:%M:%S')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def get_logger(log_dir, name):\n class StreamHandlerWithTQDM(logging.Handler):\n \"\"\"Let `logging` print without breaking `tqdm` progress bars.\n See Also:\n > https://stackoverflow.com/questions/38543506\n \"\"\"\n def emit(self, record):\n try:\n msg = self.format(record)\n tqdm.write(msg)\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\n # Create logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n # Log everything (i.e., DEBUG level and above) to a file\n log_path = os.path.join(log_dir, f'{name}.txt')\n file_handler = logging.FileHandler(log_path)\n file_handler.setLevel(logging.DEBUG)\n\n # Log everything except DEBUG level (i.e., INFO level and above) to console\n console_handler = StreamHandlerWithTQDM()\n console_handler.setLevel(logging.INFO)\n\n # Create format for the logs\n file_formatter = logging.Formatter('[%(asctime)s] %(message)s',\n datefmt='%m.%d.%y %H:%M:%S')\n file_handler.setFormatter(file_formatter)\n console_formatter = logging.Formatter('[%(asctime)s] %(message)s',\n datefmt='%m.%d.%y %H:%M:%S')\n console_handler.setFormatter(console_formatter)\n\n # add the handlers to the logger\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n return logger", "def create_logger():\n log = logging.getLogger() # root logger\n log.setLevel(logging.DEBUG)\n format_str = '%(asctime)s - %(levelname)-8s - %(message)s'\n date_format = '%Y-%m-%d %H:%M:%S'\n if os.isatty(2):\n cformat = '%(log_color)s' + format_str\n colors = {'DEBUG': 'reset',\n 'INFO': 'reset',\n 'WARNING': 'bold_yellow',\n 'ERROR': 'bold_red',\n 'CRITICAL': 'bold_red'}\n formatter = colorlog.ColoredFormatter(cformat, date_format,\n log_colors=colors)\n else:\n formatter = logging.Formatter(format_str, date_format)\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n log.addHandler(stream_handler)\n return logging.getLogger(__name__)", "def get_logger(name: str) -> logging.Logger:\n \n return logging.getLogger(name)", "def _setup_galaxy_logger():\n log = logging.getLogger()\n log.setLevel(logging.DEBUG)\n log_out = logging.StreamHandler(sys.stdout)\n log_out.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n log_out.setFormatter(formatter)\n log.addHandler(log_out)\n return log", "def get_logger(logger_name='root'):\n return getLogger(logger_name)", "def get_logger(name, fluentd_host='localhost', fluentd_port=24224):\n logger = logging.getLogger(name)\n fluent_handler = handler.FluentHandler(\n 'mole.logs',\n host=fluentd_host,\n port=fluentd_port,\n buffer_overflow_handler=overflow_handler\n )\n formatter = handler.FluentRecordFormatter(\n custom_format,\n format_json=False\n )\n fluent_handler.setFormatter(formatter)\n logger.addHandler(fluent_handler)\n return logger", "def get_logger(name: str, level: str = LOG_LEVEL) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.propagate = False\n logger.setLevel(level)\n coloredlogs.install(\n level=level, logger=logger, fmt='%(asctime)s %(name)s: %(lineno)s %(levelname)s: %(message)s', field_styles=FIELD_STYLES\n )\n return logger", "def _logger():\n return logging.getLogger(module_name)", "def logger(name=None):\r\n\r\n log = logging.getLogger(name or 'logging')\r\n if HANDLER and HANDLER not in log.handlers:\r\n log.addHandler(HANDLER)\r\n\r\n return log", "def get_logger(namespace):\n namespace = __name__ if not namespace else namespace\n\n # create logger with namespace\n logger = logging.getLogger(namespace)\n logger.setLevel(logging.INFO)\n\n # create formatter and add it to the handlers\n formatter = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s')\n\n # create console handler\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n return logger", "def get_logger(name: str) -> logging.Logger:\n\n if name in LOGGER_TABLE:\n return LOGGER_TABLE[name]\n\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n logger.addHandler(STREAM_HANDLER)\n\n LOGGER_TABLE[name] = logger\n return logger", "def getLogger(name):\n log = logging.getLogger(name)\n log.setLevel(logging.DEBUG)\n hnd2 = logging.StreamHandler(sys.stdout)\n fmt2 = logging.Formatter(fmt='%(name)-20s %(levelname)-8s %(message)s')\n hnd2.setLevel(logging.NOTSET)\n hnd2.addFilter(FilterLevel(True, [logging.INFO]))\n hnd2.setFormatter(fmt2)\n log.addHandler(hnd2)\n hnd1 = logging.StreamHandler(sys.stdout)\n fmt1 = logging.Formatter(fmt=('%(name)-20s %(levelname)-8s' +\n '%(filename)s:%(lineno)s %(message)s'))\n hnd1.setLevel(logging.NOTSET)\n hnd1.addFilter(FilterLevel(False, [logging.INFO]))\n hnd1.setFormatter(fmt1)\n log.addHandler(hnd1)\n return log", "def _get_logger(name=None, level=None):\n\n logger = logging.getLogger(name)\n if level is not None:\n logger.setLevel(level)\n\n return logger", "def get_logger(name=None, level=\"warn\"):\n logger_name = str(uuid.uuid4())[:8] if name is None else name\n logger = logging.getLogger(logger_name)\n level = os.environ.get(\"LOG_LEVEL\", level)\n\n msg_formats = {\n \"debug\": \"%(asctime)s [%(levelname)s] %(message)s [at %(filename)s:%(lineno)d]\",\n \"info\": \"%(asctime)s %(message)s [at %(filename)s:%(lineno)d]\",\n \"warn\": \"%(asctime)s %(message)s\",\n \"warning\": \"%(asctime)s %(message)s\",\n \"error\": \"%(asctime)s [%(levelname)s] %(message)s [at %(filename)s:%(lineno)d]\",\n \"critical\": \"%(asctime)s [%(levelname)s] %(message)s [at %(filename)s:%(lineno)d]\",\n }\n level_mapping = {\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warn\": logging.INFO,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n }\n\n date_format = \"%Y-%m-%d %H:%M:%S\"\n formatter = logging.Formatter(fmt=msg_formats[level.lower()], datefmt=date_format)\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n if len(logger.handlers) > 0:\n rm_idx = [idx for idx, handler in enumerate(logger.handlers) if isinstance(handler, logging.StreamHandler)]\n for idx in rm_idx:\n del logger.handlers[idx]\n logger.addHandler(handler)\n logger.setLevel(level_mapping[level.lower()])\n return logger", "def get_logger(name='some script'):\n\n #timestamp for filename \n timestamp = datetime.now().strftime('%Y-%m-%d')\n\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n #custom formatter\n formatter = logging.Formatter(\n '%(asctime)s %(name)s %(levelname)s %(filename)s '\n '%(funcName)s line: %(lineno)s: %(msg)s'\n )\n handler = logging.FileHandler('/tmp/scripts_{0}.log'.format(timestamp))\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n #print to stdout if it's interactive, but file-only if not\n if sys.stdin.isatty():\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n \n return logger", "def get_logger(log_level, log_handlers):\n logger = logging.getLogger()\n logger.setLevel(getattr(logging, log_level.upper()))\n if not logger.hasHandlers():\n if log_handlers is not None:\n for handler in log_handlers:\n logger.addHandler(handler)\n else:\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\"[%(asctime)s %(processName)s] %(levelname)s - %(message)s\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def get_logger(parent_module=''):\n return GLOBAL_LOGGER_BUILDER.build_logger(parent_module)", "def get_logger():\n return PLLogger.GetLogger(\"testintel\")", "def getLogger():\n return logging.getLogger(__name__)", "def get_instance():\n if Logger._logger_instance is None:\n Logger()\n return Logger._logger_instance", "def get_logger(name):\n # type: (str) -> Logger\n return logging.getLogger(name)", "def get_logger(args):\n logger_kind = 'tensorboard' if 'logger' not in args.__dict__ else args.logger\n if logger_kind == 'tensorboard':\n logger = pl.loggers.tensorboard.TensorBoardLogger(\n save_dir=os.path.join(os.getcwd(), 'tmp'),\n name=args.dataset,\n )\n\n elif logger_kind == 'wandb':\n logger = pl.loggers.WandbLogger(\n save_dir=os.path.join(os.getcwd(), 'tmp'),\n name=args.backbone,\n )\n\n else:\n raise Exception(f'Error. Logger \"{lokker_kind}\" is not supported.')\n return logger", "def logger(self) -> logging.Logger:\n cls = type(self)\n return logging.getLogger(cls.__module__ + \".\" + cls.__name__)", "def get_logger():\n global swan_logger\n return swan_logger", "def get_console_logger(name=__file__, level=logging.INFO):\n sh_formatter = logging.Formatter(\n \"%(asctime)s - %(levelname)s - %(name)s- %(funcName)s -%(lineno)d-\"\n \" %(message)s\"\n )\n logger = logging.getLogger(name)\n logger.setLevel(level)\n sh = logging.StreamHandler()\n sh.setFormatter(sh_formatter)\n logger.addHandler(sh)\n return logger", "def get_logger(name):\n return StyleAdapter(logging.getLogger(name))", "def _get_logger(self):", "def logger(self):\n if not self._state[\"logger\"]:\n self.prepare()\n return self._state[\"logger\"]", "def get_sequoia_logger() -> logging.Logger:\n logging.basicConfig(\n level=config[\"LOGGING\"][\"LEVEL\"],\n format=\"[%(asctime)s] %(levelname)s %(module)s.%(funcName)s.%(lineno)d %(message)s\",\n )\n return logging.getLogger()", "def _get_logger(filename='test_install.log'):\n logger = logging.getLogger('test_install.py')\n logger.setLevel(logging.DEBUG)\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n\n file_handler = logging.FileHandler(filename)\n file_handler.setLevel(logging.DEBUG)\n\n logger.addHandler(console_handler)\n logger.addHandler(file_handler)\n\n return logger", "def get_logger(name: str):\n logger = logging.getLogger(name)\n\n for handler in HANDLERS:\n logger.addHandler(handler)\n\n return logger", "def get_logger(name):\n logger = logging.getLogger(name)\n level = get_module_log_level(name)\n logger.setLevel(level)\n handler = logging.FileHandler(get_log_file(name))\n handler.setFormatter(logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]'\n ))\n handler.setLevel(level)\n logger.addHandler(handler)\n logger.info(\"returning a logger set to level: {} for module: {}\".format(level, name))\n return logger", "def _get_logger(self):\n return Logger(\"Weak Algorithms\")", "def create_logger():\n logger = logging.getLogger(\"punctuation_logger\")\n logger.setLevel(logging.INFO)\n #logger.setLevel(logging.NOTSET) # Set Logger's level to NOTSET, default is WARNING\n\n # create the logging file handler\n if options.log_file is not None:\n fh = logging.FileHandler(options.log_file)\n \n fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(fmt)\n fh.setFormatter(formatter)\n fh.setLevel(logging.NOTSET)\n # add handler to logger object\n logger.addHandler(fh)\n return logger", "def get_logger(name, conf):\n\n try:\n # try absolute path\n lfile = conf['log_file']\n except KeyError:\n print('config warning: log file is not configured, logging to default.log')\n lfile = 'default.log'\n except:\n print('config error: log file directory does not exist')\n lfile = 'default.log'\n\n try:\n timezone = conf['time_zone']\n except KeyError:\n timezone = 'America/Chicago'\n\n tz = pytz.timezone(timezone)\n\n class Formatter(logging.Formatter):\n def converter(self, timestamp):\n return datetime.datetime.fromtimestamp(timestamp, tz)\n\n def formatTime(self, record, datefmt=None):\n dt = self.converter(record.created)\n if datefmt:\n s = dt.strftime(datefmt)\n else:\n t = dt.strftime(self.default_time_format)\n s = self.default_msec_format % (t, record.msecs)\n return s\n\n logger = logging.getLogger(name)\n handler = logging.FileHandler(lfile)\n handler.setFormatter(Formatter(\"%(asctime)s: %(levelname)s: %(name)s: %(message)s\", \"%Y-%m-%dT%H:%M:%S%z\"))\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n return logger", "def get_ucxpy_logger():\n\n _level_enum = logging.getLevelName(os.getenv(\"UCXPY_LOG_LEVEL\", \"WARNING\"))\n logger = logging.getLogger(\"ucx\")\n\n # Avoid duplicate logging\n logger.propagate = False\n\n class LoggingFilter(logging.Filter):\n def filter(self, record):\n record.hostname = socket.gethostname()\n record.timestamp = str(\"%.6f\" % time.time())\n return True\n\n formatter = logging.Formatter(\n \"[%(timestamp)s] [%(hostname)s:%(process)d] UCXPY %(levelname)s %(message)s\"\n )\n\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n handler.addFilter(LoggingFilter())\n logger.addHandler(handler)\n\n logger.setLevel(_level_enum)\n\n return logger", "def getLogger(logger_name='root'):\n if not CONFIGURATION_SET:\n set_config()\n return structlog.get_logger(logger_name, name=logger_name)", "def loggerSetup(logLevel=logging.INFO):\n logger = logging.getLogger(__name__)\n outHandler = logging.StreamHandler(sys.stdout)\n outHandler.setFormatter(logging.Formatter(\"%(asctime)s:%(levelname)s:%(module)s: %(message)s\"))\n outHandler.setLevel(logLevel)\n logger.addHandler(outHandler)\n logger.setLevel(logLevel)\n return logger", "def logger():\n logger = logging.getLogger(\"Automation_Dispatcher\")\n logger.setLevel(settings.LOGLEVEL)\n handler = logging.StreamHandler()\n logger.addFilter(_Commmon_filter())\n handler.setFormatter(logging.Formatter('%(asctime)s [%(component)s]'\n ' [%(levelname)s] %(message)s', \"%Y-%m-%d %H:%M:%S\"))\n logger.addHandler(handler)\n return logger", "def create_logger():\n logging.basicConfig(level = logging.INFO, filename='logging', filemode='w')\n logger = logging.getLogger(\" \")\n admin_handler = logging.FileHandler('logging')\n admin_handler.setLevel(logging.INFO)\n logger.addHandler(admin_handler)\n logger.warning(f'{admin_handler} created a new logger')\n return logger", "def log():\n return logging.getLogger(__name__)", "def get_logger(logger_name, logging_format, file_name, level=logging.INFO):\n path, prepared = '', True\n for cat in file_name.split('/')[1:-1]:\n path += '/%s' % cat\n if not os.path.exists(path):\n try:\n os.mkdir(path)\n except PermissionError:\n prepared = False\n break\n if not prepared:\n file_name = '/tmp/%s' % file_name.split('/')[-1]\n logging.basicConfig(level=level, format=logging_format)\n log = logging.getLogger(logger_name)\n handler = logging.FileHandler(file_name, encoding='utf8')\n handler.setFormatter(logging.Formatter(logging_format))\n log.addHandler(handler)\n log.setLevel(level=level)\n return log", "def init_logger(level, printout=True):\n root_logger = logging.getLogger(\"optimus\")\n root_logger.setLevel(level)\n\n # Redirect outputs to the void space, mostly for usage within unittests\n if not printout:\n from io import StringIO\n\n dummystream = StringIO()\n handler = logging.StreamHandler(dummystream)\n # Standard output with colored messages\n else:\n handler = logging.StreamHandler()\n handler.setFormatter(\n colorlog.ColoredFormatter(\n \"%(asctime)s - %(log_color)s%(message)s\", datefmt=\"%H:%M:%S\"\n )\n )\n\n root_logger.addHandler(handler)\n\n return root_logger", "def get_logger(self):\n return self.logger" ]
[ "0.76173455", "0.73903525", "0.7385359", "0.73532313", "0.72620827", "0.7261054", "0.7208833", "0.72087556", "0.7140604", "0.7138828", "0.7128172", "0.71156687", "0.7111992", "0.7104806", "0.70627075", "0.70546305", "0.70511794", "0.7048386", "0.70350707", "0.7021459", "0.6994875", "0.6994218", "0.69697547", "0.6959803", "0.6941394", "0.69319063", "0.6930942", "0.69300723", "0.6914437", "0.68929183", "0.68786114", "0.686125", "0.6849836", "0.6825919", "0.68222684", "0.6809728", "0.68090236", "0.68049955", "0.6800724", "0.67453355", "0.67438775", "0.67402166", "0.6734794", "0.67330563", "0.672942", "0.67016244", "0.67011386", "0.66920114", "0.66897976", "0.668446", "0.6675524", "0.6662941", "0.6649594", "0.6647716", "0.6647061", "0.66446704", "0.6644304", "0.66321796", "0.6627281", "0.6620939", "0.6617367", "0.6615173", "0.66032046", "0.6593598", "0.6585532", "0.6581872", "0.6580114", "0.65800613", "0.65783024", "0.6574071", "0.65549934", "0.6548562", "0.6526965", "0.65245104", "0.65039456", "0.6496883", "0.649229", "0.64900714", "0.64686656", "0.64681816", "0.6467586", "0.6463794", "0.6451809", "0.6435428", "0.64351755", "0.643071", "0.64112425", "0.6407181", "0.6402484", "0.6396914", "0.639", "0.6388395", "0.6384405", "0.6383414", "0.6367038", "0.63535845", "0.63503635", "0.63279086", "0.6322206", "0.63174057" ]
0.76066315
1
Find the first time a running sum repeats
def find_repeating_frequency(values): frequencies = set([0]) index = 0 frequency = 0 while True: found = False for value in values: frequency += value index += 1 if frequency in frequencies: found = True break frequencies.add(frequency) if found: break return frequency
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FirstRepeatingFrequency(self):\n prev_freqs = {0}\n freq = 0\n for offset in cycle(self.freq_changes):\n freq += offset\n if freq in prev_freqs:\n return freq\n else:\n prev_freqs.add(freq)", "def take_first(nums):\n take=np.zeros(len(nums),dtype=bool)\n if len(nums)==1:\n if nums[0]!=0:\n take[0]=True\n return take\n i=0\n while i<len(nums)-1:\n if nums[i]==0:\n i=i+1\n continue\n if i==0 and nums[i]==nums[i+1]:\n take[i]=True\n if i>0 and nums[i-1]==0:\n take[i]=True\n if i==0 and nums[i] != nums[i+1]:\n take[i]=True\n take[i+1]=True\n if nums[i] != nums[i+1]:\n take[i+1]=True\n i=i+1 \n return take", "def fn(i):\n if i == 0: return 1 # boundary condition \n ans = 0\n for k in range(1, N+1): \n if k not in seen and (k%i == 0 or i%k == 0): \n seen.add(k)\n ans += fn(i-1)\n seen.remove(k)\n return ans", "def firstDuplicate(array):\n dic = {}\n for num in array:\n if num in dic:\n return num\n else:\n dic[num] = True\n return -1", "def solution(A):\n A = set(A) # cast to a set to remove all duplicates\n i = 1 # set incrementor == 1 (greater than 0)\n while i in A: # if the incrementor is in our array\n i += 1 # increment it and repeat. this continues until we find the lowest missing number\n return i", "def firstMissingPositive(self, nums):\n nums.sort()\n res = 1\n for num in nums:\n if num == res:\n res += 1\n return res", "def smallest_impossible_sum(arr:list):\n smallest_sum = 1\n for i in arr:\n if i > smallest_sum:\n return smallest_sum\n else:\n smallest_sum += i\n\n return smallest_sum", "def repeatedNTimes(A: List[int]) -> int:\n \n target = len(A) / 2\n repeats = {}\n \n for num in A:\n if num not in repeats:\n repeats[num] = 1\n else:\n repeats[num] += 1\n \n for key in repeats.keys():\n if repeats[key] == target:\n return key\n \n return -1", "def firstMissingPositive(self, nums):\n n = len(nums)\n for i in range(n):\n elem, idx = nums[i], nums[i] - 1\n while idx >= 0 and idx < n and nums[idx] != elem:\n nextElem = nums[idx]\n nums[idx] = elem\n elem, idx = nextElem, nextElem - 1\n # print('here')\n\n for i in range(n):\n if nums[i] != i + 1:\n return i + 1\n return n + 1", "def _find_repeats(data: Tensor) ->Tensor:\n temp = data.detach().clone()\n temp = temp.sort()[0]\n change = torch.cat([torch.tensor([True], device=temp.device), temp[1:] != temp[:-1]])\n unique = temp[change]\n change_idx = torch.cat([torch.nonzero(change), torch.tensor([[temp.numel()]], device=temp.device)]).flatten()\n freq = change_idx[1:] - change_idx[:-1]\n atleast2 = freq > 1\n return unique[atleast2]", "def repeats(arr):\n return sum([el for el in arr if arr.count(el) == 1])", "def fn(x):\n ans = rsm = ii = 0 \n for i in range(len(nums)): \n rsm += nums[i]\n while rsm > x: # sliding window \n rsm -= nums[ii]\n ii += 1\n ans += i - ii + 1\n return ans", "def find_repeat(ints):\r\n\r\n n = len(ints)\r\n # n ints, so the list is\r\n # 1..(n-1) + some duplicated number\r\n # = ((n-1) * (1 + n - 1) / 2)\r\n # = ((n-1) * (n) / 2)\r\n # = ((n-1) * (n / 2.0)\r\n gauss = int((n - 1) * (n / 2.0))\r\n total = sum(ints)\r\n dupe = total - gauss\r\n return dupe", "def repeatedNTimes(self, A: List[int]) -> int:\n \"\"\" \n A = sorted(A)\n if A[0] == A[1]:\n return A[0]\n return A[int(len(A)/2)]\n \"\"\"\n res = set()\n for a in A:\n if a in res:\n return a\n res.add(a)", "def method2():\n n = 1000\n s = 0\n multiples = [3,5]\n total = []\n\n for m in multiples:\n total.append(0)\n\n minValue = 0\n while(minValue < 1000):\n minValue = 1000\n minPosition = 0\n for i, v in enumerate(total):\n if v < minValue:\n minValue = v\n minPosition = i\n\n temp = total[minPosition] + multiples[minPosition]\n\n if(temp < 1000) and (temp not in total):\n s += temp\n\n total[minPosition] = temp\n\n return s", "def firstMissingPositive(self, nums: List[int]) -> int:\r\n n = len(nums)\r\n for i in range(n):\r\n if nums[i] < 1 or nums[i] > n:\r\n nums[i] = 0\r\n \r\n for i in range(n):\r\n if 1 <= nums[i] % (n + 1) <= n:\r\n ind = nums[i] % (n + 1) - 1\r\n nums[ind] += n + 1\r\n \r\n for i in range(n):\r\n if nums[i] <= n:\r\n return i + 1\r\n \r\n return n + 1", "def solution(A):\n \n cars = 0\n ones = 0\n\n for i in range(len(A), 0, -1):\n\n if A[i-1] == 1:\n ones += 1\n else:\n cars += ones\n\n return (-1 if cars > 1000000000 else cars)", "def find_num_appearing_once(ls):\n \n sum_bin = 0\n ans = [] # can also define a list with a length of 32 for 32 bit machine\n result = 0 # decimal\n for e in ls:\n # print(int(format(e, 'b')))\n sum_bin += int(format(e, 'b'))\n print(sum_bin)\n \n # record which bit cannot mod 3\n while sum_bin != 0:\n bit = sum_bin % 10\n if bit % 3 != 0:\n ans.append(1)\n else:\n ans.append(0)\n sum_bin //= 10\n print(ans)\n \n # convert ans into decimal\n pow = len(ans) - 1\n while len(ans) != 0:\n result += ans.pop() * 2**pow\n pow -= 1\n print(result)", "def find_missing_pos_nlogn(input):\n input = sorted(input)\n min = 1\n for ii in input:\n if ii > 0:\n if ii == min:\n min += 1\n else:\n return min\n\n return min", "def find_repeat_cycle(programs, moves):\n init = programs[:]\n repeat_cycle = 0\n while True:\n programs = apply_moves(programs, moves)\n repeat_cycle += 1\n if programs == init:\n return repeat_cycle", "def one():\r\n \r\n i = 1\r\n sum = 0\r\n while i < 1000:\r\n if i % 3 == 0 or i % 5 == 0:\r\n sum = sum + i\r\n i = i + 1\r\n else:\r\n i = i + 1\r\n return sum", "def fn(n, x, r):\n if n == 0: return 1\n ans = 0\n for xx in range(6): \n if xx != x: ans += fn(n-1, xx, 1)\n elif xx == x and r < rollMax[x]: ans += fn(n-1, x, r+1)\n return ans", "def find_it(val):\n val = sorted(val, key=int)\n temp_val = val[0]\n counter = 0\n for i, number in enumerate(val):\n if not number in val:\n # number repeated odd times\n return temp_val\n else:\n if number == temp_val:\n counter = counter + 1\n else:\n if counter % 2 != 0:\n return temp_val\n counter = 1\n temp_val = number\n return temp_val", "def first_missing_num(the_list):\n the_list.sort()\n first_index = 0\n next_min = 0\n for i, v in enumerate(the_list):\n if v > 0:\n first_index = i\n next_min = v\n break\n for num in the_list[first_index:]:\n if num < next_min:\n continue\n elif num == next_min:\n next_min += 1\n else:\n return next_min\n return next_min", "def runs(L):\n runs = []\n current_run = []\n if len(L)>0:\n current_run = [L[0]]\n for i in range(len(L)-1):\n if L[i]+1==L[i+1]:\n current_run.append(L[i+1])\n else:\n if len(current_run)==1:\n runs.append(current_run[0])\n else:\n runs.append(current_run)\n current_run = [L[i+1]]\n if len(current_run)>0:\n if len(current_run)==1:\n runs.append(current_run[0])\n else:\n runs.append(current_run)\n return runs", "def track_runs(iterable):\r\n track_repeats=[]\r\n current_element = None\r\n current_repeats = 0\r\n element_i = 0\r\n for element in iterable:\r\n if current_element == element:\r\n current_repeats += 1\r\n else:\r\n track_repeats.append((current_repeats,current_element, element_i-current_repeats))\r\n current_element = element\r\n current_repeats = 1\r\n element_i += 1\r\n track_repeats = track_repeats[1:]\r\n return track_repeats", "def long_repeat(line):\n num = 0\n l = []\n if len(line):\n for i in range(len(line)-1):\n if line[i+1] == line[i]:\n num += 1\n l.append((line[i],num+1))\n else:num = 0\n if l:\n return(sorted(l,key = lambda x:x[1],reverse = True)[0][1])\n else: return 1\n return 0", "def keep_first_iteration(self):\n self.keep_first_iteration_flag = True", "def first_frequency_twice(changes: Sequence[int]) -> int:\n frequency = 0\n counts = {frequency: 1}\n found = False\n while not found:\n for change in changes:\n frequency += change\n count = counts.get(frequency, 0) + 1\n if count == 2:\n found = True\n break\n counts[frequency] = count\n return frequency", "def find_the_duplicate(nums):\n # frequency = {}\n\n # for num in nums:\n # frequency[num] = frequency.get(num, 0) + 1\n\n # for num in frequency:\n # if frequency[num] == 2:\n # return num\n\n ##########\n\n # nums_dict = list(enumerate(sorted(nums)))\n\n # for i, num in nums_dict:\n # if num == nums_dict[i + 1]:\n # return num\n\n ##################\n\n seen = set()\n\n for num in nums:\n if num in seen:\n return num\n seen.add(num)", "def calc_rec_cycle(number):\n result = 0\n i = 10 ** (int(math.log10(number)) + 1)\n s = set()\n\n while True:\n if i == number or i == 0:\n result = 0\n break\n\n if i < number:\n result += 1\n i *= 10\n continue\n\n # i > n\n r = i % number\n #print('r',r)\n if r not in s:\n result += 1\n s.add(r)\n else:\n break\n\n i = r * 10\n return result", "def fn(i, seen=set()):\n ans = 0\n if i < len(s): # boundary condition when i == len(s)\n for ii in range(i+1, len(s)+1): \n if s[i:ii] not in seen: \n seen.add(s[i:ii])\n ans = max(ans, 1 + fn(ii, seen))\n seen.remove(s[i:ii])\n return ans", "def first_missing_positive_int_linear(nums):\n\t\n\t# Here's the trick: the first missing positive number must be \n\t# between 1 and len(array) + 1 \t\n\ts = set(nums)\n\t\n\tfor i in range(1, len(nums) + 1):\n\t\tif i not in s:\n\t\t\treturn i", "def running_sum(nums_li: List[int]) -> List[int]:\n for i in range(1, len(nums_li)):\n nums_li[i] += nums_li[i - 1]\n return nums_li", "def solution(a: list) -> int:\n c = Counter(a)\n return next(k for k, v in c.items() if v % 2)", "def brute_force(seats):\n for seat in seats:\n if seat + 1 not in seats and seat + 2 in seats:\n return seat + 1\n\n return None", "def get_min_run(n):\n r = 0\n while n >= 64:\n r |= n & 1\n n >>= 1\n return n + r", "def fn(x):\n if not x: return 0 \n ans = inf\n freq = Counter(x)\n for cnt in freqs: \n if x[0] in cnt: \n xx = \"\".join(k*v for k, v in (freq - cnt).items())\n ans = min(ans, 1 + fn(xx))\n return ans", "def fn(freq, r):\n ans = 0 \n if any(freq): \n for i, x in enumerate(freq): \n if x: ans = max(ans, fn(freq[:i] + (x-1,) + freq[i+1:], (r+i)%batchSize))\n if r == 0: ans += 1\n return ans", "def solution():\n i = 1\n\n while True:\n if (\n sorted(str(i))\n == sorted(str(2 * i))\n == sorted(str(3 * i))\n == sorted(str(4 * i))\n == sorted(str(5 * i))\n == sorted(str(6 * i))\n ):\n return i\n\n i += 1", "def find_first_postitive(arr):\n n = len(arr)\n containsOne = False\n for i in range(n):\n if arr[i] == 1:\n containsOne = True\n elif arr[i] <= 0 or arr[i] > n:\n arr[i] = 1\n if not containsOne:\n return 1\n for i in range(n):\n index = abs(arr[i]-1)\n if arr[index] > 0:\n arr[index] = -arr[index]\n for i in range(n):\n if arr[i] > 0:\n return i+1\n return n+1", "def singles(counts):\n return (counts==1).sum()", "def get_n1(r,N):\n n1 = N - np.sum(r)\n return n1", "def firstMissingPositive(nums):\n\n n = len(nums)\n\n # Base case.\n if 1 not in nums:\n return 1\n\n # nums = [1]\n if n == 1:\n return 2\n\n # Replace negative numbers, zeros,\n # and numbers larger than n by 1s.\n # After this conversion nums will contain \n # only positive numbers.\n for i in range(n):\n if nums[i] <= 0 or nums[i] > n:\n nums[i] = 1\n\n # Use index as a hash key and number sign as a presence detector.\n # For example, if nums[1] is negative that means that number `1`\n # is present in the array. \n # If nums[2] is positive - number 2 is missing.\n for i in range(n): \n a = abs(nums[i])\n # If you meet number a in the array - change the sign of a-th element.\n # Be careful with duplicates : do it only once.\n # [3,4,-1,1]\n if a == n:\n nums[0] = - abs(nums[0])\n else:\n nums[a] = - abs(nums[a])\n \n # Now the index of the first positive number \n # is equal to first missing positive.\n for i in range(1, n):\n if nums[i] > 0:\n return i\n\n if nums[0] > 0:\n return n\n\n return n + 1", "def brute_force_hashed(seats):\n seats = set(seats)\n for seat in seats:\n if seat + 1 not in seats and seat + 2 in seats:\n return seat + 1\n\n return None", "def next(self):\r\n rnd = rand() * self.totals[(-1)]\r\n return bisect.bisect_right(self.totals, rnd)", "def search(f):\n x = 0\n while not f(x):\n x += 1\n return x", "def arr_4(A):\n copy = np.copy(A)\n leftovers = 20 % A\n total_leftovers = np.cumsum(leftovers) #total leftovers after each day\n enough_left = total_leftovers >= A #boolean array to determine whether we have enough to buy another stock for the day\n\n if True in enough_left:\n day = list(enough_left).index(True) #gets first index where we can buy\n return day\n else:\n return -1", "def remove_duplicates(arr):\n if not arr:\n return 0\n last_unique = None\n ans = 0\n for curr in range(len(arr)):\n if arr[curr] != last_unique:\n ans += 1\n last_unique = arr[curr]\n return ans", "def sum_n(k, lst):\n seen = set()\n for num in lst:\n if k - num in seen:\n return True\n seen.add(num)\n return False", "def r1(P):\n assert P.isreal()\n ans = 0\n s = P.sturm()\n while s:\n ans += s\n P = P.gcd(P.prime())\n s = P.sturm()\n return ans", "def sum_of_reoccurring_data_points(x):\n unique, counts = np.unique(x, return_counts=True)\n counts[counts < 2] = 0\n return np.sum(counts * unique)", "def countZeroes(arr):\n counter = 0\n #sort the array\n arr.sort(reverse=True)\n print(arr)\n n = len(arr)\n print(n)\n\n # Find index of first zero in given array\n first = firstZero(arr, 0, n - 1)\n \n # If 0 is not present at all, return 0\n if (first == -1):\n return 0\n\n for i in range(first,len(arr)):\n if (arr[i] == 0):\n counter += 1\n else:\n break\n\n return counter", "def sum_finder(nums, sum_wanted):\r\n\r\n for i, ni in enumerate(nums):\r\n\r\n for x, nx in enumerate(nums[i+1:]):\r\n\r\n if ni + nx == sum_wanted:\r\n print(\"Yes\", ni, \"and\", nx, \"=\", sum_wanted)\r\n else:\r\n print(ni, \"and\", nx, \"=\", \"No match\")", "def find_non_trivial_orbit(generators: [Permutation]) -> int:\n if not generators:\n return None\n n = generators[0].n\n for P in generators:\n for element in range(n):\n if P[element] != element:\n return element", "def elementary_summand(fixed, i):\n if i < fixed:\n return 0\n elif i == fixed:\n return 2\n else:\n return 1", "def findFirstTrueValue( data ):\n # for long sound with a lot of silence and noise, it's faster to recode it having a return well placed. (8sec => 0.052sec)\n n = len(data);\n i = 0; \n while( i < n ):\n if( data[i] ):\n return i\n i += 1\n return -1", "def consecutive_prime_sum():\n print \"testing consecutive_prime_sum\"\n sum=0\n for x in range(100000):\n if x!=1:\n if recursive_prime(x)=='prime':\n #print x\n sum = sum + x\n if sum > 1000000:\n sum = sum - x\n return sum\n #if x is prime, then add x to sum\n #check if sum is above 1 million\n #if sum is above 1 million, then subtract x from it and return that number", "def play_one_game():\n sum = roll_dice()\n print(\"You rolled \", sum)\n if (sum == 7 or sum == 11):\n return 1\n elif (sum == 2 or sum == 3 or sum == 12):\n return 0\n else:\n point = sum\n print(\"Your point is \", point)\n print(\" \")\n newsum = 0\n while (newsum != point and newsum != 7):\n newsum = roll_dice()\n print(\"You rolled\", newsum)\n if (newsum == point):\n return 1\n else:\n return 0", "def problem077():\n\n cond = lambda n: num_prime_sum_ways(n) > 5000\n ans = next(filter(cond, itertools.count(2)))\n return ans", "def repeated(f, n, x):\n if n == 1:\n return f(x)\n else:\n return repeated(f,n-1,f(x))", "def first_value(self):\n return 0", "def fn(k):\n seen = set()\n for i in range(len(s)-k+1): \n val = (prefix[i+k] - prefix[i]*fac[k]) % MOD \n if val in seen: return True # rolling hash (ver. Monte Carlo)\n seen.add(val)\n return False", "def fn(n, k):\n if n == k: return 1\n if k == 0: return 0\n return ((n-1)*fn(n-1, k) + fn(n-1, k-1)) % 1_000_000_007", "def summer_of_69(arr):\n total = 0\n add = True\n for num in arr:\n while add:\n if num != 6:\n total += num\n break\n else:\n add = False\n break\n while not add:\n if num != 9:\n break\n else:\n add = True\n break\n return total", "def fn(i):\n if i == 2*n-1 or ans[i] and fn(i+1): return True \n for x in reversed(range(1, n+1)): \n if x not in ans: \n ii = x if x > 1 else 0 \n if i+ii < 2*n-1 and ans[i] == ans[i+ii] == 0: \n ans[i] = ans[i+ii] = x\n if fn(i+1): return True \n ans[i] = ans[i+ii] = 0", "def part_one(rucksacks: list) -> int:\n summ = 0\n for rucksack in rucksacks:\n split_point = len(rucksack) // 2\n first = set(rucksack[:split_point])\n second = set(rucksack[split_point:])\n misplaced_item = list(first.intersection(second))[0]\n summ += PRIORITY.get(misplaced_item, 0)\n return summ", "def helper(nums_dict, S):\n if not nums_dict:\n return 1 if S == 0 else 0\n \n num = max(nums_dict.keys())\n num_count = nums_dict.pop(num)\n remaining_sum = sum([k*v for k, v in nums_dict.items()])\n #print(nums_dict, remaining_sum, S)\n \n ans = 0\n num_sum = -num*num_count\n for i in range(num_count+1):\n if -remaining_sum <= S - num_sum <= remaining_sum:\n ans += helper(nums_dict.copy(), S-num_sum) * comb(num_count, i)\n num_sum += 2*num\n return ans", "def solution3(nums, K):\n modSeen = {0:-1}\n s = 0\n for i in range(len(nums)):\n n = nums[i]\n s += n\n mod = s % K if K != 0 else s\n if mod in modSeen:\n if i - modSeen[mod] > 1:\n return True\n else:\n modSeen[mod] = i\n return False", "def firstMissingPositiveInteger(arr):\n missing_value = 1\n try:\n for i in range(len(arr)):\n if arr[i] > 0:\n if arr[i] == missing_value:\n missing_value += 1\n \n except IndexError as err:\n print('IndexError --> {0}'.format(err))\n raise\n except ValueError as err:\n print('ValueError --> {0}'.format(err))\n raise\n\n finally:\n return missing_value", "def find_i(n):\n lst = []\n for i in range(1, n):\n lst.append(2 * compute(n - i) + 2 ** i - 1)\n result = min(lst)\n return lst.index(result) + 1", "def fn(i, x):\n if i == goal: return x == n \n ans = 0 \n if x < n: ans += (n-x) * fn(i+1, x+1) # a new song\n if k < x: ans += (x-k) * fn(i+1, x) # an old song\n return ans % 1_000_000_007", "def getFirstFactor (n):\r\n # Start with iterator = 2\r\n i = 2\r\n\r\n while i < sqrt(n):\r\n # If n % i is, then the first factor has been found\r\n if n % i == 0:\r\n return i\r\n\r\n i += 1\r\n\r\n # Return -1 if somehow a factor isn't found\r\n return -1", "def get_next_keystream_value(deck_of_cards):\n get_big_joker_value(deck_of_cards)\n get_small_joker_value(deck_of_cards)\n move_small_joker(deck_of_cards)\n move_big_joker(deck_of_cards)\n triple_cut(deck_of_cards)\n insert_top_to_bottom(deck_of_cards)\n keystream_value = get_card_at_top_index(deck_of_cards)\n \n if keystream_value == get_big_joker_value(deck_of_cards) or \\\n keystream_value == get_small_joker_value(deck_of_cards):\n keystream_value = get_next_keystream_value(deck_of_cards)\n return keystream_value\n\t\n # Condition where if keystream_value is equal to big_joker_value or\n # small_joker_value then this will be repeated. After occuring it is then \n # checked again to see if keystream_value is equal to big_joker_value or\n # small_joker_value. If so, then again repeated until not so.", "def next_run_idx(self):\n return self.num_runs", "def recurrent_sum_of_elements_in_list(lst):\n if len(lst) == 0:\n return 0\n elif len(lst) == 1:\n return lst[0]\n return lst[0] + recurrent_sum_of_elements_in_list(lst[1:])", "def sum1():\n\txs = []\n\tfor i in range(100000):\n\t\tnum = joe.randrange(1000)\n\t\txs.append(num)\n\n\ttot = sum(xs)\n\treturn tot", "def first_missing_positive(nums):\n start = 0\n end = len(nums) - 1\n while start <= end:\n i = nums[start] - 1\n # if this element is in position\n if i == start:\n start += 1\n # if the element is negative or out of bounds \n # or a duplicate that is already sorted swap the\n # current element into the oob and dec the end \n elif i < 0 or i > end or nums[start] == nums[i]:\n nums[start] = nums[end]\n end -= 1\n # swap the element to where it should be\n else:\n nums[start], nums[i] = nums[i], nums[start]\n \n return start + 1", "def every_n_iters(self, runner: Runner, n: int):\n if runner.iter < self.start_iter:\n return True\n return (runner.iter + 1 - self.start_iter) % n == 0 if n > 0 else False", "def consecutiveNumbersSum(self, N):\n\n count = 0\n # nmax = int(-1 + sqrt(1+8*N)/2)\n # print(nmax)\n n = 1\n n2 = n*(n-1)/2 + n\n while n2 <= N:\n if (N-n2) % n == 0:\n # print(n)\n count += 1\n n += 1\n n2 = n*(n-1)/2 + n\n\n # Note N-(n2-n) % n == N-n2 % n\n return count", "def method1(self, nums):\n ## remain is how many occurances remain \n remain = Counter(nums)\n ## key is a sequence ending with \"key\" and with a length not smaller than 3, value is how many of such sequence are there\n found = defaultdict(int)\n \n for n in nums:\n if remain[n] == 0:\n continue\n ## if there is a sequence with length larger than or equal to 3 ends with nums[i] - 1, append nums[i] to it\n if found[n - 1] > 0:\n found[n] += 1\n found[n - 1] -= 1\n remain[n - 1] -= 1\n remain[n] -= 1\n ## else, nums[i] should be the first number of a sequence, need to find the following 2 other numbers, if cannot find them, return False\n else:\n ## whether there are nums[i] + 1 and nums[i] + 2 remain\n if remain[n + 1] > 0 and remain[n + 2] > 0:\n found[n + 2] += 1\n remain[n] -= 1\n remain[n + 1] -= 1\n remain[n + 2] -= 1\n else:\n return False\n \n return True", "def fact_while1(n: int) -> int:\n ret = 1\n if n == 0:\n return 1\n while True:\n if n == 1:\n return ret\n n, ret = n - 1, ret * n", "def find_minrun(n: int) -> int:\n r = 0 # Becomes 1 if any bits are shifted off\n assert n >= 0\n while n >= 64:\n # The target of this while-loop:\n # If n is an exact power of 2, return 32;\n # otherwise, return int k in [32,64] such that n/k is close to, but strictly \n # less than, an exact power of 2 that is larger than 2^1=2.\n \n # | is `OR by bits`, & is `AND by bits`. ie r = r|(n&1).\n # The next two lines of code work as follows:\n # 1. If n is an exact power of 2, then for all loops, n&1=0, r=r|0=0|0=0, \n # and n is halved, until n=64 and is halved to 32, with r=0, so returns 32.\n # 2. Otherwise, then there must be at least one `1` among the second to the \n # last digits of n's binary form, eg.10010000. We scan from the rightmost digit # to the left, and whenever a 1 is met, r is 1. n will decrease to the n//2^k \n # that is closest to but less than 64. The target is met.\n #\n # In essence, this procedure is simply taking the first 6 bits of n, and add \n # 1 if any of the remaining bits is 1 (we call a bit that is 1 a \"set bit\").\n\n r |= n & 1\n n >>= 1 # move n's binary form all 1 digit to the right, ie n = n // 2\n # If n < 64, just return n, since it is too small to bother with fancy stuff\n return n + r", "def fn(x, step):\n if x == stones[-1]: return True \n ans = False \n for ss in (step-1, step, step+1): \n if 0 < ss and x + ss in loc: ans = ans or fn(x + ss, ss)\n return ans", "def runs(lst):\n for j, two in enumerate(lst):\n if j == 0:\n one, i = two, 0\n if one != two:\n yield j - i, one\n i = j\n one = two\n yield j - i + 1, two", "def remove_duplicates(nums: List[int]) -> int:\n\n if not nums:\n return 0\n\n slow = 0\n for fast in range(1, len(nums)):\n # compare element with a next one in order to find a duplicate in a non-decreasing array\n # if current element is unique,\n # slow runner grows one step and copys the current value\n if nums[slow] != nums[fast]:\n slow += 1\n nums[slow] = nums[fast]\n return slow + 1", "def problem1():\n return sum(i for i in range(1000) if i % 3 == 0 or i % 5 == 0)", "def smallest_does_not_occur(list_):\n length = len(list_)\n if list_[0] != 1:\n return 1\n does_not_occur = [\n list_[each] + 1\n for each in range(length - 1)\n if list_[each + 1] - list_[each] > 1\n ]\n return (\n min(does_not_occur)\n if len(does_not_occur) > 0\n else list_[length - 1] + 1\n )", "def find_missing(numbers):\n left = 0\n right = len(numbers) - 1\n\n while left < right:\n middle = (left + right) // 2\n if numbers[middle] != middle + 1:\n if right == middle:\n return middle + 1\n right = middle\n else:\n if left == middle:\n if numbers[right] == right + 1:\n return right + 2\n return right + 1\n left = middle\n return 1", "def fn(x):\n if x <= 0: return int(x == 0)\n return sum(fn(x - xx) for xx in nums)", "def sum_in_set_linear(array, x):\n\n s = set() \n for i in range(len(array)):\n diff = x - array[i]\n if diff in s:\n return True\n s.add(array[i])\n return False", "def find_missing(nums):\n # calculate sum of all elements\n # in input list\n sum_of_elements = sum(nums)\n\n # There is exactly 1 number missing\n n = len(nums) + 1\n actual_sum = (n * (n + 1)) / 2\n return actual_sum - sum_of_elements", "def FIRST(L):\n global fi,eps\n R=set()\n eps_appear=False\n for x in L:\n eps_appear=False\n if not x.isTerminal():\n for o in fi[x]:\n if o==eps:\n eps_appear=True\n else:\n R.add(o)\n if eps not in fi[x]:\n break\n elif x!=eps:\n R.add(x)\n break\n else: # x==eps\n eps_appear=True\n if eps_appear:\n R.add(eps)\n if len(R)==0:\n R.add(eps)\n return R", "def first_missing_positive_int(nums):\n\n\t# Here's the trick again: the first missing positive number must be \n\t# between 1 and len(array) + 1 \n\n\t# use the array to keep track of what numbers exist in the list\n\t# we can ignore any negative numbers and numbers bigger \n\t# than len(array). \n\t# The basic idea is to use the indices of the array itself to \n\t# reorder the elements to where they should be\n\t\n\tfor i, num in enumerate(nums):\n\t\t# keep swapping until we get a negative or too large number \n\t\twhile i+1 != nums[i] and 0 < nums[i] <= len(nums)+1:\n\t\t\t# perform swap\n\t\t\tx = nums[i]\n\t\t\tnums[i], nums[x-1] = nums[x-1], nums[i]\n\n\t\t\t# if there are dupes we need to break or else we swap forever, so as long as we have ONE record of there being this number, it doesn't matter about the rest:\n\t\t\tif nums[i] == nums[x-1]:\n\t\t\t\tbreak\n\n\n\t# now just iterate through our 'effectively' sorted array (not really sorted, just the portion that matters)\n\tfor i, num in enumerate(nums):\n\t\tif num != i+1:\n\t\t\treturn i+1", "def fn(x):\n while mp.get(x, 0) < k: \n mp[x] = 1 + mp.get(x, 0)\n fn(x[1:] + str(mp[x]-1))\n if not ans: ans.append(x)\n else: ans.append(x[0])", "def find_the_sum(number):\n the_sum = 0\n\n for i in range(number):\n # The number is a multiple of 3 or 5\n # If the number is a multiple of both 3 and 5, it is counted once\n if (i % 3 == 0) or (i % 5 == 0):\n the_sum += i\n\n return the_sum", "def identicalRuns(s):\n beg = 0\n for k, v in groupby(s):\n end = beg + len(list(v))\n yield k, beg, end\n beg = end", "def big_straight(dice):\n if sorted(dice) == [2, 3, 4, 5, 6]:\n return sum(dice)\n return 0", "def least_disruptive_subarray(a, s):\n assert len(s) <= len(a)\n\n s_sum = sum(s)\n a_sum = sum(a[i] for i in xrange(len(s)))\n disruption = abs(a_sum - s_sum)\n index = 0\n\n for i in xrange(len(s), len(a)):\n a_sum += (a[i] - a[i - len(s)])\n\n if abs(a_sum - s_sum) < disruption:\n index = i - len(s) + 1\n disruption = abs(a_sum - s_sum)\n\n return index", "def findSmallestInteger(self, nums: List[int], value: int) -> int:\n\n count = Counter(n % value for n in nums)\n stop = 0\n for i in range(value):\n if count[i] < count[stop]:\n stop = i\n\n return value * count[stop] + stop" ]
[ "0.6377952", "0.6170564", "0.61421", "0.5981056", "0.5942517", "0.5867513", "0.5838692", "0.58114886", "0.58026123", "0.5784409", "0.5751145", "0.5738505", "0.56759614", "0.56725645", "0.56579083", "0.5624306", "0.55687976", "0.55209714", "0.5511702", "0.5495576", "0.54878193", "0.54838127", "0.5475442", "0.54661965", "0.54554", "0.54503936", "0.54422593", "0.5420203", "0.5407048", "0.5395948", "0.53912485", "0.5379424", "0.5378934", "0.5378618", "0.5367882", "0.533373", "0.53313285", "0.53207225", "0.53036815", "0.5280471", "0.5278142", "0.5276599", "0.52762854", "0.5274859", "0.52746135", "0.526591", "0.5261934", "0.5231913", "0.5231271", "0.52173257", "0.52159876", "0.5204488", "0.51892054", "0.51785344", "0.5176695", "0.51747024", "0.5168468", "0.51683545", "0.516558", "0.5157671", "0.51399946", "0.5137151", "0.51344573", "0.5133299", "0.5122904", "0.5119716", "0.5119109", "0.51082003", "0.51000893", "0.50923574", "0.50769395", "0.50724715", "0.50708765", "0.50576353", "0.50543845", "0.5053316", "0.5050537", "0.5045966", "0.5045158", "0.5029385", "0.5020389", "0.501974", "0.50122684", "0.5011358", "0.50098217", "0.5007932", "0.50069827", "0.5001084", "0.49997184", "0.4999597", "0.49993628", "0.49928668", "0.4992482", "0.4991715", "0.4991002", "0.49876332", "0.4986433", "0.4985844", "0.49755046", "0.4965527" ]
0.57920206
9
import count or FPKM table
def import_countOrFPKMTable( self,filename_I): #import and format the data io = base_importData(); io.read_tab(filename_I); countOrFPKMTable = self.format_countOrFPKMTable(io.data); return countOrFPKMTable;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(self):\n ans = self.execute(self.commands.table_count(self.name))\n return ans[0][0]", "def count():", "def samples(app, args):\n engine = create_engine(args.datafile)\n meta = MetaData()\n meta.reflect(engine)\n print(\"\\t\".join([str(x).replace('counts.', '')\n for x in meta.tables['counts'].columns\n if not x == 'counts.index']))", "def produce_mirna_single_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.single.bam.mirbase_counts.txt\")", "def get_table_count(table_name, query, headers, base_url, maxpagesize):\n logging.info(\"Running get_table_count() . . . \")\n\n #task_instance = context['task_instance']\n #headers = task_instance.xcom_pull('build_auth_headers', key='auth_headers')\n\n r_count = requests.get('{0}/ws/schema/table/{1}/count?{2}'.format(base_url, table_name, query), headers=headers)\n r_status = r_count.status_code\n if r_status != 200:\n logging.info('Response NOT successful. I got code {} '.format(r_status))\n raise ValueError('Response NOT successful. I got code {} '.format(r_status))\n else:\n logging.info('Response successful! I got code {} '.format(r_status))\n\n count_json = r_count.json()\n row_count = count_json['count']\n\n pages = int(math.ceil(row_count / maxpagesize))\n\n return row_count, pages", "def count_entries(self, tablename):\n query = \"Select count(*) from \" + tablename\n try:\n self.__cur.execute(query)\n except Exception as e:\n self.__conn.rollback()\n raise e\n fetcheddata = self.__cur.fetchone()\n return fetcheddata[0]", "def db_print_table_rows_cnt(db_path, table_name):\n path_exist = os.path.exists(db_path)\n if path_exist is False:\n print '!!!Error, database does not exist.'\n return\n\n try:\n with db.connect(db_path) as conn:\n cursor = conn.cursor()\n print(\" Table Name : '%s'\" % table_name)\n # Prepare and execute SQL statement\n sql = ('SELECT COUNT(*) FROM {}').format(table_name)\n cursor.execute(sql)\n count = cursor.fetchall()\n print(\" Total Rows : %s\" % count[0][0])\n except (db.OperationalError) as e:\n print(\"!!!Error, %s\" % repr(e))", "def db_print_table_rows_cnt(db_path, table_name):\n path_exist = os.path.exists(db_path)\n if path_exist is False:\n print '!!!Error, database does not exist.'\n return\n\n try:\n with db.connect(db_path) as conn:\n cursor = conn.cursor()\n print(\" Table Name : '%s'\" % table_name)\n # Prepare and execute SQL statement\n sql = ('SELECT COUNT(*) FROM {}').format(table_name)\n cursor.execute(sql)\n count = cursor.fetchall()\n print(\" Total Rows : %s\" % count[0][0])\n except (db.OperationalError) as e:\n print(\"!!!Error, %s\" % repr(e))", "def test_table_counts():\n number_of_test_run = 2 # Run the pipeline twice\n for i in range(number_of_test_run):\n dp = DataPipeline()\n dp.run()\n\n dp = DataPipeline()\n assert dp.get_product_count() == (500000,)\n assert dp.get_duplicate_count(from_table=\"products\") == (0,)\n assert dp.get_aggregate_table_result_count() == (222024, )\n 222024\n dp.close()", "def countTable(self, in_table_name):\n self.cursor.execute('SELECT COUNT(*) FROM {};'.format(in_table_name))\n return self.cursor.fetchone()[0]", "def count(self, query):", "def exp_calculator_with_count(count_table_file):\n count_table = pd.read_table(count_table_file, index_col=0)\n columns = count_table.columns\n\n gene_len = count_table[columns[0]]\n rpkm_dict = dict()\n tpm_dict = dict()\n for sample in columns[1:]:\n # Divide the read counts by the length of each gene in kilobases.\n # This gives you reads per kilobase (RPK)\n rpk = count_table[sample]/gene_len\n # get rpkm/fpkm\n total_counts = sum(count_table[sample])/1000\n \"\"\"\n rpkm = (count_table[sample]/gene_len)/(sum(count_table[sample])/1000)*1000000\n \"\"\"\n rpkm = rpk/total_counts*1000000\n # get tpm\n norm_gene_len_total_counts = sum(rpk)\n tpm = rpk/norm_gene_len_total_counts*1000000\n \"\"\"\n tpm = (count_table[sample]/gene_len)/sum(count_table[sample]/gene_len)*1000000\n \"\"\"\n # save\n rpkm_dict[sample] = rpkm\n tpm_dict[sample] = tpm\n # save results\n df_rpkm = pd.DataFrame(rpkm_dict, index=count_table.index)\n df_tpm = pd.DataFrame(tpm_dict, index=count_table.index)\n df_rpkm.to_csv(count_table_file+'.fpkm.xls', sep='\\t')\n df_tpm.to_csv(count_table_file+'.tpm.xls', sep='\\t')\n #\n return rpkm_dict, tpm_dict", "def produce_mirna_allbest_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.bam.mirbase_counts.txt\")", "def print_tables(db):\n # connect to the database and create a cursor\n\n # select all columns using SQL command\n # 'SELECT * FROM StatelessCountByCountry'\n\n # print the data from StatelessCountByCountry\n\n # select all columns using SQL command\n # 'SELECT * FROM StatelessCountByRegion'\n\n # print the data from StatelessCountByRegion", "def getCountFiles():\n result = 0\n session = Queries.createSession()\n try:\n result = session.execute(func.count(FileTable.id)).fetchone()[0]\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()\n return result", "def get_source_records_count(self, tap_type, table):\n run_query_method = getattr(self, f'run_query_{tap_type.lower()}')\n result = run_query_method(f'SELECT count(1) FROM {table}')\n return result[0][0]", "def get_counts(filename, key):\r\n column_keys, get_data = get_csv(filename)\r\n assert(key in column_keys[1:])\r\n column = column_keys[1:].index(key)\r\n print 'getcounts() %s : %s column = %d' % (filename, key, column+1) \r\n counts_dict = {}\r\n for i,(k,v) in enumerate(get_data()):\r\n x = v[column]\r\n counts_dict[x] = counts_dict.get(x, 0) + 1\r\n return counts_dict", "def test_b_count_id(self):\n storage = FileStorage()\n count = storage.count(Amenity)\n self.assertEqual(1, count)\n count = storage.count(State)\n self.assertEqual(1, count)\n count = storage.count(City)\n self.assertEqual(1, count)\n count = storage.count(User)\n self.assertEqual(1, count)\n count = storage.count(Place)\n self.assertEqual(1, count)\n count = storage.count(Review)\n self.assertEqual(1, count)", "def init():\n try:\n database.CONN\n except Exception:\n database.init()\n print('Database connection established.')\n inputtools.init()\n outputtools.init()\n\n global _CONN\n global _NAME\n global _TEMP_NAME\n global _SIMPLECOUNT_COLUMNS\n global _UCR_INDICATOR_DICT\n \n _CONN = database.CONN\n _NAME = 'SimpleCount'\n _TEMP_NAME = f'Temp{_NAME}' \n _SIMPLECOUNT_COLUMNS = ['fk_simplecount_indicator', 'fk_simplecount_county', 'year', 'value']\n _UCR_INDICATOR_DICT = {\n 'domestic':1100,\n 'school':1120,\n 'hate':1130,\n 'acca': 1400,\n 'acsa':1401,\n 'ahsna':1402,\n 'adpa':1403,\n 'ameth':1404,\n 'ch':1410,\n 'rape':1411,\n 'rob':1412,\n 'aggba':1413,\n 'ach':1414,\n 'arape':1415,\n 'arob':1416,\n 'aaggba':1417,\n 'theft':1420,\n 'burg':1421,\n 'mvt':1422,\n 'arson':1423,\n 'atheft':1424,\n 'aburg':1425,\n 'amvt':1426,\n 'aarson':1427,\n 'htsex':1430,\n 'htserve':1431,\n 'ahtsex':1440,\n 'ahtserve':1441,\n }", "def get_table_record_count(schema_name, table_name):\n sql = \"select count(*) AS 'COUNT' FROM {0}.{1} with(nolock);\"\n return fetch_row(sql.format(schema_name, table_name))", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def fast_count(db, Model): # noqa\n return db.session.execute(\n 'SELECT n_live_tup FROM pg_stat_all_tables WHERE relname = :tablename',\n {'tablename': Model.__tablename__}\n ).scalar()", "def load_fact_traffic_violations_count_agg(cur,code):\n cur.execute(code)", "def load_status_table():", "def table_stats(self, db, dest, kvargs, lines):\n if 'table' in kvargs:\n tables = [db.get_table(kvargs['table'])]\n else:\n tables = db.tables()\n options = kvargs.get('options','')\n done = False\n for table in db.tables():\n print(\"======================= {} =======================\".format(table.name))\n if 'dump' in options:\n print(\"schema dump:\")\n table.dump()\n print(\"\")\n if 'head' in options:\n print(\"First 5 records:\")\n for source_record in db.read_records_as_dicts(tablename=table.name, limit=5):\n print(source_record)\n print(\"\")\n # Compute single-variable stats on each of the variables\n sw = stopwatch().start()\n print(\"Computing statistics...\")\n stats = {}\n census_checksum = 0\n \n if self.spark_context:\n print(\"Using spark to read {} ... assuming first line has headings\".format(table.filename))\n sc = self.spark_context\n data = sc.textFile(table.filename)\n header = data.first() # extract the header\n stats = data.filter(lambda row:row!=header).map(table.parse_line_to_dict).reduce(stats_reducer)\n else:\n try:\n for source_record in db.read_records_as_dicts(tablename=table.name,limit=self.limit):\n if source_record['RECTYPE']=='P':\n census_checksum += census_person_polynominal(source_record)\n stats = stats_reducer(source_record, stats)\n except KeyboardInterrupt as e:\n print(\"*** KeyboardInterrupt at count: {}\".format(stats[':count']))\n done = True\n if stats:\n print(\"total records: {} speed: {:8.0f} records/sec\".format( stats[':count'], stats[':count']/sw.elapsed()))\n tt = tytable.ttable()\n tt.add_head(['variable','min','avg','max'])\n tt.set_col_alignment(1,tytable.ttable.RIGHT)\n tt.set_col_alignment(2,tytable.ttable.RIGHT)\n tt.set_col_alignment(3,tytable.ttable.RIGHT)\n for key in stats_variable_names(stats):\n try:\n tt.add_data([key, stats[key+\":min\"], stats[key+\":sum\"] / stats[':count'], stats[key+\":max\"]])\n except TypeError:\n tt.add_data([key, stats[key+\":min\"], \"\", stats[key+\":max\"]])\n print(tt.typeset(mode=tytable.TEXT))\n if census_checksum:\n print(\"Census checksum: {}\".format(census_checksum))\n print(\"\")\n if done:\n return True # had the keyboard abort\n return True", "def getFileCount(self) -> int:\n ...", "def test_get_table(self):\n my_conn = MySQL(*self.conn_params)\n inf_schema = my_conn.get_table('inf_schema') # GET TABLE example\n row_count = my_conn.engine.scalar(\n select([func.count('*')]).select_from(inf_schema)\n )\n # The select.columns parameter is not available in the method form of\n # select(), e.g. FromClause.select().\n # See https://docs.sqlalchemy.org/en/latest/core/selectable.html#\n # sqlalchemy.sql.expression.FromClause.select\n my_conn.engine.execute(\n select([inf_schema.c.table_name]).select_from(inf_schema))\n self.assertGreaterEqual(row_count, 100)", "def produce_mirna_unique_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.uniq.bam.mirbase_counts.txt\")", "def get_count()->int:\n stmt = sqlalchemy.select([sqlalchemy.func.count()]).select_from(_READING_TABLE)\n return execute_command(stmt).fetchall()[0][0]", "def count_records(self, conn, when='After insert') -> None:\n cur = conn.cursor()\n cur.execute(f\"SELECT COUNT(*) FROM {self.table}\")\n print(f\"{when}, the table {self.table} contains {cur.fetchall()[0][0]} records.\")", "def n_featured():\r\n sql = text('''select count(*) from featured;''')\r\n results = db.engine.execute(sql)\r\n for row in results:\r\n count = row[0]\r\n return count", "def hxlcount():\n run_script(hxlcount_main)", "def get_table_count(table_name):\n conn = get_connect()\n cursor = conn.execute(\"SELECT COUNT(*) FROM \" + table_name)\n count = cursor.fetchall()[0][0]\n conn.close()\n return count", "def dbcount(*args, enabled: bool=True, file: AnyStr=\"\", keyword: AnyStr=\"\", list: bool=True,\n maxdepth: int=0, quick: bool=True, reset: bool=True, spreadsheet: bool=True,\n **kwargs)->None:\n pass", "def analysis():\r\n data_frame = load_from_mysql('core', 'BDFMHQAA_D')\r\n data_frame.registerTempTable('business')\r\n gd = data_frame.select('AA03CSNO', 'AA08PRON')\r\n\r\n def merge_count(a, b):\r\n r = {}\r\n for p, c in a.items():\r\n if p in r:\r\n r[p] += c\r\n else:\r\n r[p] = c\r\n for p, c in b.items():\r\n if p in r:\r\n r[p] += c\r\n else:\r\n r[p] = c\r\n return r\r\n result = gd.map(lambda row: (row.AA03CSNO, {row.AA08PRON: 1})).reduceByKey(merge_count)\r\n pron_count = gd.map(lambda row: (row.AA08PRON, 1)).reduceByKey(lambda a, b: a + b)\r\n\r\n # result = gd.map(lambda row: (row.AA03CSNO, row.AA08PRON))\r\n print(result.take(10))\r\n print('----------------pron count-----------------')\r\n print(pron_count.collect())\r\n\r\n print(gd)", "def count_instances(tbl, col2count, colcounted):\n counted_ser = tbl[col2count].value_counts()\n counted_df = pd.DataFrame(counted_ser, columns=[colcounted]).reset_index()\n counted_df.rename(columns={'index':col2count},inplace=True)\n tbl = tbl.merge(counted_df,on=col2count)\n return tbl", "def count(self, args):\n counter = 0\n lists = args.split()\n\n if lists[0] not in HBNBCommand.class_check:\n print(\"** class doesn't exist **\")\n return\n\n objects = storage.all()\n for key in objects:\n name = key.split('.')\n if name[0] == lists[0]:\n counter += 1\n print(counter)", "def record_count(cur, conn):\n # get all created tables from db\n cur.execute(\"SELECT * FROM information_schema.tables WHERE table_schema='public'\")\n result = cur.fetchall()\n\n # create list of tables\n table_list = [table[2] for table in result]\n\n print('Verifying table record counts...')\n\n # get row count for each table\n for table_name in table_list:\n cur.execute(f\"SELECT COUNT(*) FROM {table_name}\")\n row_count = cur.fetchall()\n if row_count == 0:\n print(f\"WARNING, record count for {table_name} is zero '0'\")\n else:\n print(f\"SUCCESS, {row_count[0][0]} record count for {table_name} looks good!\")", "def test_upload_count(self):\n conn = initialize_connection()\n db = conn.picdb\n coll = db.images\n\n num = coll.count_documents({})\n\n self.assertEqual(num, 72389)", "def countitems(self):\n count = 0\n sid = self.client.scannerOpen(self.table, '', ['f:s'])\n while 1:\n r = self.client.scannerGetList(sid, 1000)\n #r = self.client.scannerGet(sid)\n if not r: break\n count += len(r)\n logging.debug('%d %s', count, r[-1].row)\n self.scannerClose(sid)\n return count", "async def count(file: UploadFile = File(...), db: Session = Depends(get_db)) -> json:\n if file.content_type != \"application/json\":\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Invalid file extension. Please upload file in json format\",\n )\n data_byte = await file.read()\n data = json.loads(data_byte.decode(\"UTF-8\"))\n overall_count = SpacyNER.count(data)\n for index, all_counts in overall_count.items():\n for entity, count in all_counts.items():\n db_create = EntitiesCount(article_id=index, entities=entity, counts=count)\n db.add(db_create)\n db.commit()\n return {\"success\": True}", "def get_table_nb_lines(self, table):\n sql = \"SELECT COUNT(*) FROM \" + table + \";\"\n cur = self._connection.cursor()\n cur.execute(sql)\n res = cur.fetchall()\n cur.close()\n return res[0][0]", "def fileCount(self):\n pass", "def get_project_count(db):\n\n count = 0\n for element in db:\n count += 1\n return count", "def test_data_source_soaps_count_get(self):\n pass", "def recordCount(self, schema, table):\r\n r = self.fetchSqlRecords(\r\n \"SELECT count(*) FROM {}\".format(self.encodeTableName(schema, table)))\r\n return r[0][0]", "def _get_total_record_count(self, conn, tblname):\n with conn.cursor() as cursor:\n cursor.execute(sql.SQL('SELECT COUNT(*) FROM {0}').format(sql.Identifier(tblname)))\n return cursor.fetchone()[0]", "def write_count_table(filtered_counts, gene_names, ids_liver_header):\n with open(\"filtered_tcga_counts.tsv\", \"w\") as file:\n file.write(\"gene_id\\tgene_name\\t\" + \"\\t\".join(ids_liver_header) + \"\\n\")\n for gene_name, counts_line in zip(gene_names, filtered_counts):\n file.write(gene_name + \"\\t\" + \"\\t\" + \"\\t\".join(counts_line) + \"\\n\")", "def file_num(self):\n command = \"SELECT COUNT(id) FROM files;\"\n return self.c.execute(command)", "def n_count(category):\r\n sql = text('''\r\n WITH uniq AS (\r\n SELECT COUNT(app.id) FROM task, app\r\n LEFT OUTER JOIN category ON app.category_id=category.id\r\n WHERE\r\n category.short_name=:category\r\n AND app.hidden=0\r\n AND app.info LIKE('%task_presenter%')\r\n AND task.app_id=app.id\r\n GROUP BY app.id)\r\n SELECT COUNT(*) FROM uniq\r\n ''')\r\n\r\n results = db.engine.execute(sql, category=category)\r\n count = 0\r\n for row in results:\r\n count = row[0]\r\n return count", "def on_get(self, req, resp, table):\n user = req.context['user']\n engine = user_db_engine(user)\n query = \"SELECT COUNT(id) FROM {}\".format(table)\n with engine.new_session() as session:\n count = session.execute(query).fetchone()[0]\n\n resp.context['result'] = { 'result': 'ok', 'count': count }\n resp.status = falcon.HTTP_200", "def size_sqlite_table(cursor,table_name):\n #Inspired by code of Pieter Muller\n columnsQuery = \"PRAGMA table_info({0})\".format(table_name)\n cursor.execute(columnsQuery)\n numberOfColumns = len(cursor.fetchall())\n \n rowsQuery = \"SELECT Count() FROM ({0})\".format(table_name)\n cursor.execute(rowsQuery)\n numberOfRows = cursor.fetchone()[0]\n return({'nrow':numberOfRows,'ncol':numberOfColumns})", "def t_announceDbCount(self, *_):\n try: self.current_count=self.dbh.getRowCount()\n except: self.current_count=0\n \n self.dprint(\"* ratings_count: current count(%s)\" % self.current_count)\n self.pub(\"ratings_count\", self.current_count)", "def get_count(self, db_name, table_name):\n # Create Connection\n engine, connection = self.create_connection(db_name)\n\n # Check if table exists\n if engine.dialect.has_table(engine, table_name):\n sql = 'select count(*) from %s;' % table_name\n result = pd.read_sql(sql, connection, coerce_float=True).iloc[:, 0].values[0]\n else:\n print(table_name, \"does not exist\")\n result = None\n\n # Close connection\n connection.close()\n\n return result", "def do_count(self, args):\n args = shlex.split(args)\n if len(args) < 1:\n return\n _nb_objects = 0\n items = storage.all()\n for key in items:\n if items[key].__class__.__name__ == args[0]:\n _nb_objects += 1\n print(_nb_objects)", "def write_counts(self):\n\n self.db_file.write(\n struct.pack(\"<HH\", self.number_of_entries, 0x0040))\n self.db_file.write(\n struct.pack(\"<HH\", self.number_of_genres, 0x0010))\n self.db_file.write(\n struct.pack(\"<HH\", self.number_of_performers, 0x0010))\n self.db_file.write(\n struct.pack(\"<HH\", self.number_of_albums, 0x0010))\n self.db_file.write(\n struct.pack(\"<HH\", self.number_of_playlists, 0x0010))\n self.db_file.write(\n struct.pack(\"<HH\", 0x0001, 0x0014))\n\n self.db_file.write(\n b\"\\x01\\x00\\x02\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x02\\x00\\x00\\x00\\x00\\x00\")\n self.db_file.write(\n b\"\\x00\\x00\\x06\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\")", "def test_number_models_defined():\n tables, _ = run_mysql(\"SHOW TABLES;\")\n assert len(defined_models) == len(tables)", "def create_and_insert_dfs(connection: DBConnection) -> None:\n print(\"\\n[-] creating table dfs\", end=\"\")\n connection.execute(\"\"\"\n CREATE TABLE dfs AS\n SELECT term, COUNT(tf) AS df FROM tfs GROUP BY term\n \"\"\")\n print(\"\\r[+] creating table dfs\")", "def check_row_counts(self):\n\n df_len = len(self.df)\n sql = \"select count(*) from clock_staging;\"\n result = self.session.execute(sql).fetchone()[0]\n if df_len != result:\n raise ValueError(\n \"Count of Staging Table (clock_staging) does not match the CSV file!\"\n )", "def test_data_source_soaps_id_dynamic_datas_count_get(self):\n pass", "def get_all_counts(filename):\r\n column_keys, get_data = get_csv(filename)\r\n all_counts_dict = {}\r\n for key in column_keys[1:]:\r\n all_counts_dict[key] = {}\r\n\r\n for i,(k,v) in enumerate(get_data()):\r\n for key in column_keys[1:]:\r\n column = column_keys[1:].index(key)\r\n x = v[column]\r\n all_counts_dict[key][x] = all_counts_dict[key].get(x, 0) + 1\r\n return all_counts_dict", "def getRowCount(self) -> int:\n ...", "def count(self):\n with self.pdq:\n (count,)=self.pdq.cursor().execute('select count(*) from pdq').next()\n return count", "def test_count(self):\r\n assert self.table.objects.count() == 12\r\n\r\n q = self.table.objects(test_id=0)\r\n assert q.count() == 4", "def get_col_count(files: List[str],\n dialect: csv.Dialect) -> int:\n for record in csv.reader(fileinput.input(files[0]), dialect):\n field_cnt = len(record) -1\n break\n fileinput.close()\n return field_cnt", "def count_likes(db, filename):\n cur = db.cursor()\n sql = \"\"\"\n select count(filename) from likes where filename=?;\n \"\"\"\n cur.execute(sql, (filename,))\n like_sum = cur.fetchone()[0]\n return like_sum", "def get_datasets_count(request):\n organization_id = request.GET.get('organization_id', '')\n datasets_count = Organization.objects.get(\n pk=organization_id).import_records.all().distinct().count()\n\n return {'status': 'success', 'datasets_count': datasets_count}", "def test_total_throughput(self):\n self.query(\n \"CREATE TABLE foobar \"\n \"(id STRING HASH KEY, foo NUMBER, THROUGHPUT (1, 1))\"\n \"GLOBAL INDEX ('idx', id, foo, THROUGHPUT(1, 1))\"\n )\n desc = self.engine.describe(\"foobar\", refresh=True)\n self.assertEqual(desc.total_read_throughput, 2)\n self.assertEqual(desc.total_write_throughput, 2)", "def column_creator(path):\n if not os.path.exists(path+'tables'):\n os.makedirs(path+'tables')\n\n\n # Sequences\n if os.path.exists(path+'SEQ.txt'):\n with open(os.path.join(path+'SEQ.txt')) as f1, open(os.path.join(path+'tables/sequences_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n # Modifications\n if os.path.exists(path + 'modifications.txt'):\n\n with open(os.path.join(path+'modifications.txt')) as f1, open(os.path.join(path+'tables/modifications_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n # Spectrum identify:\n if os.path.exists(path + 'spectrum_identify.txt'):\n\n with open(os.path.join(path+'spectrum_identify.txt')) as f1, open(path+'tables/spectrum_ide_table.txt', 'a') as f3:\n lines1 = f1.read().count('\\n')\n f3.write(\"%s\\n%s\\n\" % (\"Spectrum Number\",lines1))\n f1.close()\n f3.close()\n\n if os.path.exists(path + 'spectrum_unidentify.txt'):\n with open(os.path.join(path+'spectrum_unidentify.txt')) as f2, open(path+'tables/spectrum_unide_table.txt', 'a') as f3:\n lines2 = f2.read().count('\\n')\n f3.write(\"%s\\n%s\\n\" % (\"Spectrum Number\",lines2))\n f2.close()\n f3.close()\n\n if os.path.exists(path+'taxonomy_identify.txt'):\n # Taxonomy ide:\n with open(os.path.join(path+'taxonomy_identify.txt')) as f1, open(os.path.join(path+'tables/taxonomy_ide_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n\n if os.path.exists(path + 'taxonomy_unidentify.txt'):\n # Taxonomy unide:\n with open(os.path.join(path+'taxonomy_unidentify.txt')) as f1, open(os.path.join(path+'tables/taxonomy_unide_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()", "def ask_tab_done():\n with CONNECTION:\n CURSOR.execute('SELECT COUNT(*) FROM marks')\n result = CURSOR.fetchone()[0]\n return str(result)", "def countPlayers():\n # gets connection to tournament database in conn object\n conn = connect()\n # gets the cursor to execute queries\n c = conn.cursor()\n # executes select with count aggregate function query number of players\n # in PLAYER table\n c.execute(\"SELECT COUNT(*) FROM PLAYER;\")\n # retreives the result in count variable\n count = c.fetchone() [0]\n # closes the connection to tournament database\n conn.close()\n # returns the number of players in PLAYER table\n return count", "def feature_count(self, f, cat):\n res = self.con.execute(\n 'select count from fc where feature=\"%s\" and category=\"%s\"'\n %(f, cat)).fetchone()\n \n if res == None:\n return 0\n else:\n return float(res[0])", "def read_counter(self, path):\n self.cursor.execute('SELECT * FROM \"counter\" WHERE \"fullpath\"=?', (path,))\n row = self.cursor.fetchone()\n count = 0\n if row != None : count = row[1]\n # print 'read_counter:', path, count\n return count", "def Count_Documents(db):\r\n \r\n count = db.Transaction.estimated_document_count()\r\n print(\"Number of documents in the database Transaction: \" + str(count) + \".\\n\")\r\n return count", "def summary():\n\n summary_result = session.query(Summary.Count, Summary.Total).all()\n session.close()\n\n # Return a List of Column Names (Sample Names)\n return jsonify(summary_result)", "def do_count(self, *args):\n count = 0\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n else:\n ''' Get a list of specified instances '''\n for key, obj in storage.all().items():\n key = key.split('.')\n if key[0] == args[0]:\n count += 1\n print(count)", "def calc_total_rows(self):\n #total_rows = len(self.file_list) - 1 # Minus header\n print('Total number of rows: ' + str(self.tot_rows))\n results.append('Total number of rows: ' + str(self.tot_rows))", "def countreadcolumns(prop_info):\n count = 0\n for prop in prop_info:\n if isinstance(prop['table_name'], str):\n count += 1\n else:\n count += len(prop['table_name'])\n return count", "def load_data(self, filename):\r\n #sqlcontext = SQLContext(self.sc)\r\n #df = sqlcontext.read.format('com.databricks.spark.csv').options(header='false', inferschema='true').load(filename)\r\n #df = sc.textFile(r\"C:\\Users\\mohan\\Downloads\\patches.csv\").map(lambda line: line.split(\",\"))\r\n #print (df.count())\r\n df = self.sc.textFile(filename).map(lambda line: line.split(\",\"))\r\n l = df.map(lambda w: [int(float(c)) for c in w]).zipWithIndex()\r\n return l\r\n raise NotImplementedError", "def num_keys_command():\n # Get Args needed for the command\n incident = demisto.args().get('id', get_investigation_id())\n # Search Collection counting matching incident_id\n cursor = COLLECTION.find({})\n count = 0\n for i in cursor:\n if incident in i:\n count += 1\n return f'The count of the key/value pairs for the incident - {str(count)}', {}, {}", "def load_luigi_stats(db_path, table):\n engine = create_engine('sqlite:///' + db_path)\n return pd.read_sql_table(table, engine)", "def peek_table (db, name):\n count = '''SELECT COUNT (*) FROM {table}'''.format (table=name)\n display (pandas.read_sql_query (count, db))\n peek = '''SELECT * FROM {table} LIMIT 5'''.format (table=name)\n display (pandas.read_sql_query (peek, db))", "def create_and_insert_d(connection: DBConnection) -> None:\n print(\"\\n[-] creating table d\", end=\"\")\n connection.execute(\"\"\"\n CREATE TABLE d AS\n SELECT COUNT(DISTINCT did) AS size FROM tfs\n \"\"\")\n print(\"\\r[+] creating table d\")", "def count_total():\r\n trans = transaction.begin()\r\n StatBookmarkMgr.count_total_bookmarks()\r\n trans.commit()", "def count():\r\n return Activation.query.count()", "def tblscans(self):\n sql = '''select to_char(value, 'FM99999999999999990') retvalue from \n v$sysstat where name = 'table scans (long tables)' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def get_plant_family_stats(db_path: str) -> int:\n return get_db_count(db_path, 'company_data.db', 'plant_families')", "def execQ2():\n # Put columns together\n frame = pan.DataFrame(data, columns=['Product', 'Amount'] )\n amount = frame.groupby(['Product']).count()\n return amount", "def gridfs_count(db, filtro={}, limit=2000, campos=[]):\n if filtro:\n if not campos:\n campos = [(key, 1) for key in filtro.keys()]\n logger.debug('integracao.gridfs_count filtro:%s hint:%s' %\n (filtro, campos))\n try:\n params = dict(filter=filtro,\n hint=campos)\n if limit:\n params['limit'] = limit\n print(params)\n return db['fs.files'].count_documents(**params)\n except OperationFailure as err:\n logger.error(err)\n params.pop('hint')\n return db['fs.files'].count_documents(**params)\n return db['fs.files'].count_documents({})", "def count(query):\n cursor = db.execute_sql(query)\n result = cursor.fetchone()[0]\n return result", "def analyze_tables(cur, conn):\n i = 0\n for query in count_table_queries:\n print(\" Analytical Table: {}..\".format(count_table_order[i]))\n cur.execute(query)\n results = cur.fetchone()\n\n for res in results:\n print(\" \", res)\n i = i + 1\n print(\" [Finished] \")", "def test_counts_to_reports(self):\n\n self.data = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/test_oobsplit.csv'))\n\n self.output = utils.counts_to_reports(self.data)\n\n # length of new dataframe should equal the sum of all counts of passed data\n self.assertEqual(len(self.output), self.data.Counts.sum())\n\n # simple check output is actually a dataframe\n self.assertTrue(isinstance(self.output, pd.DataFrame))\n\n # test unique IDs are produced as expected\n self.assertEqual(self.output.UID[0], 'E010117AN0')", "def __init__(self):\n self.num_counts = {}", "def query_one(\n self, table_name_users, table_name_activities, table_name_trackpoints\n ):\n\n query = (\n \"SELECT UserCount.NumUsers, ActivitiesCount.NumActivities, TrackpointCount.NumTrackpoints FROM \"\n \"(SELECT COUNT(*) as NumUsers FROM %s) AS UserCount,\"\n \"(SELECT COUNT(*) as NumActivities FROM %s) AS ActivitiesCount,\"\n \"(SELECT COUNT(*) as NumTrackpoints FROM %s) AS TrackpointCount\"\n )\n\n self.cursor.execute(\n query % (table_name_users, table_name_activities,\n table_name_trackpoints)\n )\n rows = self.cursor.fetchall()\n print(tabulate(rows, headers=self.cursor.column_names))\n return rows", "def app_count_data():\n stmt = db.session.query(Appsdata).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n #df = pd.read_csv(\n # 'googleplaystore.csv')\n\n #print(\"df:\")\n #print(df)\n\n reduced_df = df.loc[: , ['Category' , 'Installs']]\n #print(\"reduced_df:\")\n #print(reduced_df)\n\n reduced_df['Installs'] = reduced_df['Installs']\n grouped_reduced_df = reduced_df.groupby(['Category']).count()\n # print(\"grouped:\")\n #print(list(grouped_reduced_df.index))\n\n category_list = list(grouped_reduced_df.index)\n installs_count = list(grouped_reduced_df['Installs'])\n\n # Format the data for Plotly\n plot_trace = {\n \"x\": category_list,\n \"y\": installs_count,\n \"type\": \"bar\"\n \n }\n return jsonify(plot_trace)", "def report(param):\n featurecount_dir = param['working_dir']+'report/featureCount/'\n if not os.path.exists(featurecount_dir):\n os.makedirs(featurecount_dir)\n \n #report only if there were actually results\n out_file = param['working_dir']+'deliverables/featureCount_raw_counts.txt'\n if os.path.exists(out_file):\n param['report'].write('<center><br><br><h2>FeatureCount statistics</h2>')\n table = process_stat_files(param)\n MODULE_HELPER.create_sub_report(param, out_file, table, 'featureCount', 'FeatureCount') \n MODULE_HELPER.plot_count_overview(param, 'featureCount', table)", "def count(self):\n return self.query.count(with_limit_and_skip = True)" ]
[ "0.6216667", "0.59590936", "0.5957887", "0.5952389", "0.5868071", "0.58158195", "0.57693267", "0.57693267", "0.5755721", "0.5700496", "0.56974876", "0.5680915", "0.56658834", "0.5662107", "0.56482613", "0.56469244", "0.5637406", "0.559692", "0.5541019", "0.55354065", "0.5535229", "0.5535229", "0.5535229", "0.5535229", "0.5512007", "0.54988885", "0.54971683", "0.54693854", "0.54447764", "0.54443556", "0.544142", "0.5438055", "0.5432542", "0.5409049", "0.5398155", "0.5389348", "0.537032", "0.53542984", "0.5347697", "0.53205", "0.5317878", "0.53125113", "0.53036", "0.5301163", "0.52950054", "0.52927166", "0.5268007", "0.52435046", "0.52339375", "0.5228439", "0.52234864", "0.52205354", "0.5209973", "0.52001405", "0.51980025", "0.5191834", "0.51841253", "0.51777047", "0.5176894", "0.5173697", "0.51683694", "0.5164598", "0.51644045", "0.51590514", "0.5151906", "0.5147145", "0.51418334", "0.5137472", "0.5134651", "0.5133006", "0.51319265", "0.5129623", "0.5123622", "0.51114666", "0.51071364", "0.5104732", "0.5095722", "0.50947315", "0.5081645", "0.50719136", "0.5068008", "0.5055212", "0.5052952", "0.5050863", "0.5047214", "0.50455415", "0.50441784", "0.504113", "0.5030207", "0.5029378", "0.502105", "0.5016641", "0.50148445", "0.5014411", "0.50121737", "0.5010892", "0.5004661", "0.49953768", "0.4995333", "0.49897102" ]
0.7532327
0
reformat attr tables into a dictionary for rapid alignment of attr table with tracking_id
def reformat_attrTable( self): #format into a dictionary of rows for quick aligning with the tracking_id if self.attrTable: attrTable = self.attrTable[:]; else: attrTable = []; attrTable_dict = {}; for row in attrTable: attrTable_dict[row['tracking_id']] = row; return attrTable_dict;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _process_attrs(attrs):\n new_attrs = OrderedDict()\n for attr in attrs:\n col = attr\n if isinstance(attr, tuple):\n col, attr = attr\n # special cases\n if attr == 'class_name':\n attr = '__class__.__name__'\n if attr == 'repr':\n attr = repr\n new_attrs[col] = attr\n\n return new_attrs", "def transform_attributes(attrs):\n transformed = {}\n for key, value in attrs.items():\n if key in [\"raw_message\", \"text\"]:\n transformed[\"raw_content\"] = value\n elif key in [\"diaspora_handle\", \"sender_handle\", \"author\"]:\n transformed[\"handle\"] = value\n elif key == \"recipient_handle\":\n transformed[\"target_handle\"] = value\n elif key == \"parent_guid\":\n transformed[\"target_guid\"] = value\n elif key == \"first_name\":\n transformed[\"name\"] = value\n elif key == \"image_url\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"large\"] = value\n elif key == \"image_url_small\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"small\"] = value\n elif key == \"image_url_medium\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"medium\"] = value\n elif key == \"tag_string\":\n transformed[\"tag_list\"] = value.replace(\"#\", \"\").split(\" \")\n elif key == \"bio\":\n transformed[\"raw_content\"] = value\n elif key == \"searchable\":\n transformed[\"public\"] = True if value == \"true\" else False\n elif key == \"target_type\":\n transformed[\"entity_type\"] = DiasporaRetraction.entity_type_from_remote(value)\n elif key == \"remote_photo_path\":\n transformed[\"remote_path\"] = value\n elif key == \"remote_photo_name\":\n transformed[\"remote_name\"] = value\n elif key == \"status_message_guid\":\n transformed[\"linked_guid\"] = value\n transformed[\"linked_type\"] = \"Post\"\n elif key in BOOLEAN_KEYS:\n transformed[key] = True if value == \"true\" else False\n elif key in DATETIME_KEYS:\n try:\n # New style timestamps since in protocol 0.1.6\n transformed[key] = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n # Legacy style timestamps\n transformed[key] = datetime.strptime(value, \"%Y-%m-%d %H:%M:%S %Z\")\n elif key in INTEGER_KEYS:\n transformed[key] = int(value)\n else:\n transformed[key] = value or \"\"\n return transformed", "def proc_attr(inp):\n dic = {}\n for att in inp.attrs.keys():\n if getattr(inp.attrs[att], \"dtype\", None) is None:\n dic[att] = inp.attrs[att]\n elif inp.attrs[att].dtype.char == 'S':\n dic[att] = [\n x.strip() for x in inp.attrs[att].tostring().decode('ascii').split(',')\n ]\n else:\n dic[att] = (\n inp.attrs[att][0]\n if isinstance(inp.attrs[att],np.ndarray) and\n inp.attrs[att].size==1\n else inp.attrs[att]\n )\n return dic\n pass", "def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic", "def transform(attrs: dict) -> dict:\n\n pass", "def attrs_to_dict(self, attrs):\n return {k: v for k, v in attrs}", "def _checkTableAttr(self, attrs, prefix):\n if not attrs:\n return {}\n\n result = {}\n s = [] # we collect synthesized style in s\n for key, val in attrs.items():\n # Ignore keys that don't start with prefix\n if prefix and key[:len(prefix)] != prefix:\n continue\n key = key[len(prefix):]\n val = val.strip('\"')\n # remove invalid attrs from dict and synthesize style\n if key == 'width':\n s.append(\"width: %s\" % val)\n elif key == 'height':\n s.append(\"height: %s\" % val)\n elif key == 'bgcolor':\n s.append(\"background-color: %s\" % val)\n elif key == 'align':\n s.append(\"text-align: %s\" % val)\n elif key == 'valign':\n s.append(\"vertical-align: %s\" % val)\n # Ignore unknown keys\n if key not in self._allowed_table_attrs[prefix]:\n continue\n result[key] = val\n st = result.get('style', '').split(';')\n st = '; '.join(st + s)\n st = st.strip(';')\n st = st.strip()\n if not st:\n try:\n del result['style'] # avoid empty style attr\n except:\n pass\n else:\n result['style'] = st\n #logging.debug(\"_checkTableAttr returns %r\" % result)\n return result", "def _convert_table_to_dict(self, data_table):\n column_names = ['star_name', 'distance', 'brightness', 'luminosity']\n stars = {}\n for line in data_table:\n stars[line[0]] = {column_names[i] : line[i] for i in range(1, len(column_names))}\n return stars", "def aggregate_by_primary_attribute(table):\n result = {}\n for row in table:\n for attribute_to_aggregate_by in row[1].split(','):\n attribute_to_aggregate_by.strip()\n attribute_data = row[0]\n if attribute_to_aggregate_by not in result:\n result[attribute_to_aggregate_by] = [attribute_data]\n else:\n result[attribute_to_aggregate_by] += [attribute_data]\n return result", "def convert_attributes(cls, attrs):\n return {}", "def _prepare_links_table(self):\n\n links_tbl = OrderedDict()\n for colname in itertools.islice(self._pinfos, 1, None):\n links_tbl[colname] = {}\n links_tbl[colname][\"name\"] = f\"{colname}\"\n fname = colname.replace(\"%\", \"_pcnt\") + \".html\"\n links_tbl[colname][\"fname\"] = fname\n links_tbl[colname][\"hlink\"] = f\"<a href=\\\"{fname}\\\">{colname}</a>\"\n\n return links_tbl", "def to_dict(self):\n d = {}\n i = 0\n for entry in self.entries:\n d[i] = {}\n attributes = self.get_attribute_list()\n print (attributes)\n for data in attributes:\n d[i][data] = entry.__getattribute__(data)\n i = i + 1\n return d", "def attributes_metadata(self):\n\n attribute_meta = collections.defaultdict(dict)\n\n for attribute in self.attributes:\n attribute_meta[attribute.name]['valuemap'] = attribute.valuemap\n attribute_meta[attribute.name]['qualifiers'] = attribute.qualifiers\n\n return dict(attribute_meta)", "def process_attrs(attrs):\n if attrs.getLength() == 0:\n return {}\n tmp_dict = {}\n for name in attrs.getNames():\n tmp_dict[name] = attrs.getValue(name)\n return tmp_dict", "def get_attributes(self):\n \n retdict = {}\n retdict['s'] = str(self.s)\n if self.t != None:\n retdict['t'] = str(self.t)\n retdict['a'] = str(self.a)\n retdict['b'] = str(self.b)\n retdict['c'] = str(self.c)\n retdict['d'] = str(self.d)\n return retdict", "def avail_table_to_dict(avail_data):\n avail_target = avail_data[\"TARGETID\"]\n avail_loc = avail_data[\"LOCATION\"]\n avail = dict()\n for lid, tgid in zip(avail_loc, avail_target):\n if lid in avail:\n avail[lid].append(tgid)\n else:\n avail[lid] = list([tgid])\n avail = {f: np.array(av) for f, av in avail.items()}\n return avail", "def _createAttributeFormattingMap(self, scanf_list, reformat=True):\n\n order = []\n scanf_map = {}\n for entry in scanf_list:\n\n # grab attribute\n attribute = re.split('\\s', entry)[0]\n\n # add to order\n if attribute.startswith('_') or (not attribute in order):\n order.append(attribute)\n\n # reformat entry since sscanf doesn't support %g\n if reformat:\n entry = entry.replace('%g', '%f')\n\n # make format entry into list if multiple formats exist\n if attribute in scanf_map:\n formats = scanf_map[attribute]\n if not isinstance(formats, list):\n scanf_map[attribute] = [formats]\n scanf_map[attribute].append(entry)\n else:\n scanf_map[attribute] = entry\n\n return scanf_map, order", "def tables() -> dict[str, str]:\n return {\n \"land_use\": \"zone_id\",\n \"tours\": \"tour_id\",\n \"trips\": \"trip_id\",\n \"persons\": \"person_id\",\n \"households\": \"household_id\",\n }", "def _attrs_map(self) -> \"dict[int, str]\":\n return {i: attr.name for i, attr in enumerate(self._attrs())}", "def make_category_tables(category_table):\n category2label = {}\n label2category = {}\n for item in category_table.itertuples():\n category_id = item[0]\n label_id = item[4]\n category2label[category_id] = label_id\n label2category[label_id] = category_id\n return category2label, label2category", "def parse_attributes(self, attr):\n result = {}\n annotations = []\n # Sanitize and split attributes up\n split_attr = attr.strip(' \\t\\n;').split(';')\n for pair in split_attr:\n splitpair = pair.split('=')\n if len(splitpair) != 2:\n continue\n if splitpair[0] == \"ID\":\n result['identifier'] = splitpair[1]\n elif splitpair[0] == \"Name\":\n result['name'] = splitpair[1]\n elif splitpair[0] == \"Parent\":\n result['parent_id'] = splitpair[1]\n elif splitpair[0] == \"Dbxref\" or splitpair[0] == \"Ontology_term\":\n annotations.append(splitpair)\n # Make sure we found an ID\n if \"identifier\" not in result:\n return {}\n # Add annotations if we found any\n if annotations:\n result[\"annotations\"] = annotations\n return result", "def attributes(table,attrs): \n if isinstance(table,Table):\n table.html_attributes = attrs\n return table", "def insertable_dict(self):\n\n d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs if p.key not in ('table', 'stats', '_codes')}\n\n x = {('c_' + k).strip('_'): v for k, v in d.items()}\n\n return x", "def alignAndReformat_countFPKMattrTables(\n self,analysis_id_I=None,sna2experimentID_I=None,\n sna2sns_I=None):\n #reformat\n countTable_flat = self.reformat_countTable(\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,);\n fpkmTable_flat = self.reformat_fpkmTable(\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,);\n attrTable_dict = self.reformat_attrTable();\n #align\n countAndFpkmTable_aligned = [];\n for row in countTable_flat[:]:\n row.update(attrTable_dict[row['tracking_id']]);\n countAndFpkmTable_aligned.append(row);\n for row in fpkmTable_flat[:]:\n row.update(attrTable_dict[row['tracking_id']]);\n countAndFpkmTable_aligned.append(row);\n return countAndFpkmTable_aligned;", "def as_dict(self):\n return {c.key: getattr(self, c.key)\n for c in inspect(self).mapper.column_attrs}", "def to_dict(self):\n columns = self.__mapper__.column_attrs.keys()\n result = {}\n for column in columns:\n result[column] = getattr(self, column)\n return result", "def to_dict(self):\n columns = self.__mapper__.column_attrs.keys()\n result = {}\n for column in columns:\n result[column] = getattr(self, column)\n return result", "def to_dict(self):\n columns = self.__mapper__.column_attrs.keys()\n result = {}\n for column in columns:\n result[column] = getattr(self, column)\n return result", "def to_dict(self):\n columns = self.__mapper__.column_attrs.keys()\n result = {}\n for column in columns:\n result[column] = getattr(self, column)\n return result", "def to_dict(self):\n columns = self.__mapper__.column_attrs.keys()\n result = {}\n for column in columns:\n result[column] = getattr(self, column)\n return result", "def parse_distmat_to_dict(table):\r\n\r\n col_headers, row_headers, data = parse_matrix(table)\r\n assert(col_headers == row_headers)\r\n\r\n result = defaultdict(dict)\r\n for (sample_id_x, row) in zip(col_headers, data):\r\n for (sample_id_y, value) in zip(row_headers, row):\r\n result[sample_id_x][sample_id_y] = value\r\n return result", "def gfa_table_to_dict(gfa_data):\n gfa_target = gfa_data[\"TARGETID\"]\n gfa_loc = gfa_data[\"GFA_LOC\"]\n gfa_gmag = gfa_data[\"GAIA_PHOT_G_MEAN_MAG\"]\n gfa = dict()\n for lid, tgid, mag in zip(gfa_loc, gfa_target,gfa_gmag):\n print(zip(gfa_loc, gfa_target,gfa_gmag))\n if lid in gfa:\n gfa[lid].append(mag)\n else:\n gfa[lid] = list([mag])\n gfa = {f: np.array(av) for f, av in gfa.items()}\n return gfa", "def object_as_dict(cls, obj):\n return {c.key: getattr(obj, c.key) for c in inspect(obj).mapper.column_attrs}", "def _build_attributes(self):\n\n # We might rebuild the program because of snippets but we must\n # keep already bound attributes\n\n dtype = []\n for (name,gtype) in self.all_attributes:\n if name not in self._attributes.keys():\n attribute = Attribute(self, name, gtype)\n else:\n attribute = self._attributes[name]\n\n self._attributes[name] = attribute\n dtype.append(attribute.dtype)", "def __repr__(self):\n return '\\t'.join([str(self.seqid), \n str(self.source), \n str(self.type), \n str(self.start), \n str(self.end), \n str(self.score), \n str(self.strand), \n str(self.phase), \n str(self.attributes_str)])\n\n\tdef addAttr(self, attr_key, attr_val, replace=False, attr_pos=0):\n\t\t\"\"\"\n\t\tAdd key-value pair to the GFF3 attribute column, at the\n beginning of the list by default. If the key exists then the\n value is concatenated with the current value separated by a\n comma, unless replace=True in which case the existing value is\n replaced.\n\t\t\"\"\"\n\t\tif attr_key in self.attributes:\n # replace the current value of this key with then new value\n\t\t\tif replace:\n\t\t\t\tdelAttr(attr_key)\n\t\t\t\tself.attributes[attr_key] = attr_val\n\t\t\t\tself.attributes_order.insert(attr_pos, attr_key)\n\t\t\t\tself.refreshAttrStr()\n # this key already exists in the attributes of the GFF3 file\n # add this value to the existing key's values with commas\n # separating values\n\t\t\telse:\n\t\t\t\tself.attributes[attr_key] = '{0},{1}'.format(\n self.attributes[attr_key], \n attr_val)\n\t\t\t\tself.refreshAttrStr()\n\t\telse:\n\t\t\tself.attributes[attr_key] = attr_val\n\t\t\tself.attributes_order.insert(attr_pos, attr_key)\n\t\t\tself.refreshAttrStr()\n\n\tdef delAttr(self, attr_key):\n\t\t\"\"\"Deletes attribute.\"\"\"\n\t\tdel self.attributes[attr_key]\n\t\tself.attributes_order.pop(self.attributes_order.index(attr_key))\n\t\tself.refreshAttrStr()", "def prepare_data(groups):\n all_dicts = []\n for idx, group in groups:\n res_dict = {'organism': group.organism.iloc[0]}\n for g_idx, row in group.iterrows():\n if pd.notna(row.label):\n res_dict[row.cmp_name] = {'label': row.label, 'mic': row.MIC}\n else:\n res_dict[row.cmp_name] = {'label': '', 'mic': row.MIC}\n all_dicts.append(res_dict)\n return all_dicts", "def get_table_attributes(jwt_payload: dict, schema_name: str, table_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n schema_virtual_module = dj.create_virtual_module(schema_name, schema_name)\n table_attributes = dict(primary_attributes=[], secondary_attributes=[])\n for attribute_name, attribute_info in getattr(schema_virtual_module,\n table_name).heading.attributes.items():\n if attribute_info.in_key:\n table_attributes['primary_attributes'].append((\n attribute_name,\n attribute_info.type,\n attribute_info.nullable,\n attribute_info.default,\n attribute_info.autoincrement\n ))\n else:\n table_attributes['secondary_attributes'].append((\n attribute_name,\n attribute_info.type,\n attribute_info.nullable,\n attribute_info.default,\n attribute_info.autoincrement\n ))\n\n return table_attributes", "def map_attributes(order: dict) -> dict:\n map_dict = {\n \"has_batteries\": FactoryMapping.cast_str_to_bool,\n \"has_glow\": FactoryMapping.cast_str_to_bool,\n \"has_lactose\": FactoryMapping.cast_str_to_bool,\n \"has_nuts\": FactoryMapping.cast_str_to_bool,\n \"min_age\": int,\n \"num_rooms\": int,\n \"num_sound\": int,\n \"pack_size\": int,\n \"dimensions\": lambda x: float(x.replace(\",\", '.')),\n \"spider_type\": SpiderType.map_str_to_enum,\n \"colour\": Colours.map_str_to_enum,\n \"variety\": ToffeeVariety.map_str_to_enum,\n \"stuffing\": Stuffing.map_str_to_enum,\n \"size\": Size.map_str_to_enum,\n \"fabric\": Fabric.map_str_to_enum\n }\n for key, value in map_dict.items():\n if key in order:\n order[key] = value(order[key])\n return order", "def sa_to_dict(obj):\n return {c.key: getattr(obj, c.key)\n for c in inspect(obj).mapper.column_attrs}", "def map_cols_to_attr(self):\n ## this is from the base class:\n ## for attr, label in zip(self.attr_names, self.labels):\n ## col_ind = self.col_inds[label]\n ## if len(self.data) > 0:\n ## setattr(self, attr, self.data[:,col_ind])\n #\n # hard coding based on what I know about saleae files:\n self.t = self.data[:,0]#.astype(float)\n nr, nc = self.data.shape\n self.num_cols = nc-1\n \n for i in range(0,self.num_cols):\n attr = 'ch_%i' % i\n j = i+1\n setattr(self, attr, self.data[:,j])#.astype(float))", "def getAttrsDict(attrs):\r\n attrsDict = json.loads(re.sub('/\\\"(?!(,\\s\"|}))','\\\\\"',attrs).replace(\"\\t\",\" \").replace(\"\\n\",\" \")) if len(attrs)>0 else {}\r\n return attrsDict", "def columnar(table: list[dict[str, str]]) -> dict[str, list[str]]:\n result: dict[str, list[str]] = {}\n keys = table[0].keys()\n for key in keys:\n result[key] = column_values(table, key)\n return result", "def _prop_2_classes(properties: dict) -> dict:\n prop_2_classes = {}\n for record in properties:\n if not pd.isnull(record[\"Properties\"]):\n props = record[\"Properties\"].strip().split(\",\")\n for pr in props:\n prop_2_classes.setdefault(pr.strip(),[]).append(record[\"Attribute\"])\n \n return prop_2_classes", "def dict(self) -> dict():\n\n dict_reg_hive = {}\n\n for _attribute in self.attributes.__dict__.items():\n if isinstance(_attribute[1], str):\n if not True in [_attribute[1].startswith(prefix) for prefix in ['<', 'providers.', 'None']]:\n _attribute_value = getattr(self, _attribute[1])\n dict_reg_hive.update({_attribute[1]: _attribute_value})\n\n return dict_reg_hive", "def __convertAttributes__(xml_source):\n attributes = {}\n for attrName, attrValue in xml_source.attributes.items():\n attributes[attrName] = attrValue\n return attributes", "def df2dict(df):\n attr_list = list(df.columns)\n res_dict = {}\n for attr in attr_list:\n res_dict[attr.lower()] = list(df[attr])\n\n return res_dict", "def prepare_attributes(attributes):\n new_attributes = []\n for attribute in attributes:\n new_attributes.append(\"e_\" + attribute)\n return new_attributes", "def clean_attributes(self):\n attrs = {}\n\n # Only fetch the fields we need.\n for a in self.attributes.only('name', 'value', 'attribute').iterator():\n if a.attribute.multi:\n if a.name not in attrs:\n attrs[a.name] = []\n attrs[a.name].append(a.value)\n else:\n attrs[a.name] = a.value\n self._attributes_cache = attrs # Cache the attributes\n\n return attrs", "def record_dict(self):\n return {p.key: getattr(self, p.key) for p in self.__mapper__.attrs}", "def get_columns_dict(table, replace):\n # 0 is name, 1 is id\n if type(table.index) == pd.MultiIndex:\n colcount = 1 + len(table.index[0])\n else:\n colcount = 2\n cols = {}\n for c in table.columns:\n c_repres = \",\".join(c)\n if \"Filtergroups\" not in c:\n cols[colcount] = replace_in_str(str(c_repres), replace)\n colcount = colcount + 1\n return cols", "def normalize_keys(df: pd.DataFrame) -> None:\n renames = {'tripUpdate_trip_tripId': 'trip_id', 'tripUpdate_trip_startDate': 'start_date',\n 'tripUpdate_trip_directionId': 'direction_id', 'tripUpdate_trip_routeId': 'route_id',\n 'tripUpdate_trip_scheduleRelationship': 'schedule_relationship',\n 'tripUpdate_trip_startTime': 'start_time',\n 'tripUpdate_timestamp': 'timestamp', 'tripUpdate_vehicle_id': 'vehicle_id',\n 'stopSequence': 'stop_sequence', 'stopId': 'stop_id',\n 'scheduleRelationship': 'schedule_relationship2',\n 'vehicle_trip_tripId': 'trip_id', 'vehicle_trip_scheduleRelationship': 'schedule_relationship',\n 'vehicle_timestamp': 'timestamp', 'vehicle_vehicle_id': 'vehicle_id',\n 'vehicle_trip_startTime': 'start_time', 'vehicle_trip_startDate': 'start_date',\n 'vehicle_trip_routeId': 'route_id', 'vehicle_trip_directionId': 'direction_id',\n 'tripUpdate_stopTimeUpdate_stopSequence': 'stop_sequence',\n 'tripUpdate_stopTimeUpdate_stopId': 'stop_id',\n 'tripUpdate_stopTimeUpdate_arrival_delay': 'arrival_delay',\n 'tripUpdate_stopTimeUpdate_arrival_time': 'arrival_time',\n 'tripUpdate_stopTimeUpdate_departure_delay': 'departure_delay',\n 'tripUpdate_stopTimeUpdate_departure_time': 'departure_time',\n 'tripUpdate_stopTimeUpdate_arrival_uncertainty': 'arrival_uncertainty',\n 'tripUpdate_stopTimeUpdate_departure_uncertainty': 'departure_uncertainty',\n 'alert_activePeriod_start': 'period_start', 'alert_activePeriod_end': 'period_end',\n 'alert_informedEntity_routeId': 'route_id', 'alert_informedEntity_stopId': 'stop_id',\n 'alert_informedEntity_trip_tripId': 'trip_id',\n 'alert_informedEntity_trip_scheduleRelationship': 'schedule_relationship',\n 'alert_headerText_translation_text': 'header_text',\n 'alert_descriptionText_translation_text': 'description_text',\n }\n df.rename(columns=renames, inplace=True)", "def to_dict(\n self,\n attributes: Iterable[str] = (\"xyz\", \"viewdir\", \"imgsz\", \"f\", \"c\", \"k\", \"p\"),\n ) -> Dict[str, tuple]:\n return {key: helpers.numpy_to_native(getattr(self, key)) for key in attributes}", "def __get_attr_format (self, attrs):\r\n format = { \r\n 'editor': None,\r\n 'min': None,\r\n 'max': None,\r\n 'step': None,\r\n 'subtype': None,\r\n 'flags': None,\r\n 'enums': None\r\n }\r\n\r\n for attr in attrs: \r\n attr_type = attr[\"type\"]\r\n if \"editor\" == attr_type:\r\n format['editor'] = attr[\"value\"] \r\n if \"min\" == attr_type:\r\n format['min'] = attr[\"value\"] \r\n if \"max\" == attr_type:\r\n format['max'] = attr[\"value\"] \r\n if \"default\" == attr_type:\r\n format['default'] = attr[\"value\"] \r\n if \"step\" == attr_type:\r\n format['step'] = attr[\"value\"]\r\n if \"subtype\" == attr_type:\r\n format['subtype'] = attr[\"value\"]\r\n if \"flags\" == attr_type:\r\n format['flags'] = attr['value']\r\n if \"enums\" == attr_type:\r\n format['enums'] = attr['value']\r\n\r\n return format", "def pairwise_info2tags(pairwise: PairwiseInfo) -> dict:\n\n rv = {\n canon_pairwise_tag(tag): raw(pairwise.metadata[tag]) for tag in pairwise.metadata or {}\n }\n rv['~their_did'] = pairwise.their_did\n rv['~their_verkey'] = pairwise.their_verkey\n rv['~my_did'] = pairwise.my_did\n rv['~my_verkey'] = pairwise.my_verkey\n return rv", "def make_to_dict(item, include_timestamp):\n return {\n '%s:%s' % (cell.family, cell.qualifier): (cell.value, cell.timestamp) if include_timestamp else cell.value\n for cell in item\n }", "def makeTargetFieldsDict(tgt_fields):\n global RES, NRES, HOTEL\n out_dict = {}\n for fld in tgt_fields:\n use, suffix = fld.split(\"_SF_\")\n if use in RES:\n act_field = \"RES\"\n elif use in NRES:\n act_field = \"JOB\"\n elif use in HOTEL:\n act_field = \"HOTEL\"\n else:\n # This is an untracked ause\n continue\n share_field = \"shr_{}\".format(use)\n sqft_field = \"{}_sqft\".format(use)\n out_dict[fld] = (act_field, share_field, sqft_field)\n return out_dict", "def applyAttrs(data, attrs):\n\tassert(len(data[0]) == len(attrs) + 1)\n\tnum_attrs = len(attrs)\n\tnum_instances = len(data)\n\n\tout = [None] * len(data)\n\tfor row in range(num_instances):\n\t\tinstance = data[row]\n\t\tout[row] = [instance[0]] + ['?' if instance[i+1] == '?' else attrs[i]['vals'][int(instance[i+1])] for i in range(num_attrs)]\n\n\treturn out", "def dictify(df):\n return {str(k): v for k, v in df.items()}", "def _split_morph_attrs(attrs: dict) -> Tuple[dict, dict]:\n other_attrs = {}\n morph_attrs = {}\n for k, v in attrs.items():\n if k in \"_\" or k in IDS.keys() or k in IDS.values():\n other_attrs[k] = v\n else:\n morph_attrs[k] = v\n return other_attrs, morph_attrs", "def _nx_lookup_attrs(to_set, record, graph):\n attrs = {}\n for i, attr in enumerate(to_set):\n key = attr.get(\"key\", i)\n value = attr.get(\"value\", \"\")\n if not value:\n lookup = attr.get(\"value_lookup\", \"\")\n if lookup:\n alias, lookup_key = lookup.split(\".\")\n node = record[alias]\n value = graph.node[node].get(lookup_key, \"\")\n attrs[key] = value\n return attrs", "def load_attributes():\n\n # <attribute_id> <attribute_name>\n attributes_file = open(PROJECT_ROOT +'/data/attributes.txt').readlines()\n attributes_file = [i.strip().split() for i in attributes_file]\n\n # <certainty_id> <certainty_name>\n certainties_file = open(PROJECT_ROOT +'/data/CUB_200_2011/attributes/certainties.txt').readlines()\n certainties_file = [i.strip().split() for i in certainties_file]\n\n # <image_id> <attribute_id> <is_present> <certainty_id> <time>\n labels_file = open(PROJECT_ROOT +'/data/CUB_200_2011/attributes/image_attribute_labels.txt').readlines()\n labels_file = [i.strip().split() for i in labels_file]\n\n attribute_ids = {}\n for i in attributes_file:\n attribute_ids[i[1]] = int(i[0])\n\n certainty_ids = {}\n for i in certainties_file:\n certainty_ids[i[1]] = int(i[0])\n\n label_ids = {}\n for i in labels_file:\n label_ids[(int(i[0]), int(i[1]))] = list(map(lambda x:int(float(x)), i[2:]))\n\n return attribute_ids, certainty_ids, labels_file, label_ids", "def generate_all_attr_change(self):\n return {\n k: self.generator(spec) for k, spec in self.attribute_spec.items()\n }", "def updateAnnoByIdDictFromMeta(syn,idDict,metaDf,refCol,fileExts):\n for key in idDict:\n print \"updating annotaion values for key: %s\" % key\n for synId in idDict[key]:\n print \"> %s\" %synId\n temp = syn.get(synId, downloadFile = False)\n exts = ')|('.join(fileExts)\n exts = r'(' + exts + ')'\n tempName = re.sub(exts,\"\",temp.name)\n row = df.loc[df[refCol] == tempName]\n temp[key] = map(str,row[key])[0]\n temp = syn.store(temp,forceVersion = False)\n print \"\"", "def _all_meta(self):\n\t\treturn {meta.key: self.type_cast(meta.value) for meta in self.meta_set.all()}", "def convert_attributes(cls, attrs):\n if attrs.get_int(\"axis\") != 1:\n raise RuntimeError(\n f\"Unsupported axis {attrs.get_int('axis')} in operator relay lrn operator. \"\n f\"Only axis = 1 is supported by Onnx.\"\n )\n\n return {\"alpha\": attrs.alpha, \"beta\": attrs.beta, \"bias\": attrs.bias, \"size\": attrs.size}", "def get_indices_convert_dict(fn):\n pdb_inp = pdb.input(file_name=fn)\n pdb_hierarchy = pdb_inp.construct_hierarchy()\n \n newids = OrderedDict((atom.id_str(), idx) for (idx, atom) in enumerate(pdb_hierarchy.atoms()))\n oldids= OrderedDict((atom.id_str(), idx) for (idx, atom) in enumerate(pdb_inp.atoms()))\n \n return {'p2a': np.array([newids[atom.id_str()] for atom in pdb_inp.atoms()]),\n 'a2p': np.array([oldids[atom.id_str()] for atom in pdb_hierarchy.atoms()])}", "def attribute_to_dict(attr: onnx.AttributeProto) -> Dict:\n ret = {}\n for a in attr:\n value = get_attribute_value(a)\n if isinstance(value, bytes):\n value = str(value, 'utf-8')\n ret[a.name] = value\n return ret", "def _parse_attr(self, attr_proto):\n attrs = {}\n for key, value in attr_proto.items():\n attrs[key] = self._get_attr(value)\n\n return attrs", "def _create_feature_dict(feature_table_file) -> dict:\n feature_dict = dict()\n with open(feature_table_file, \"r\") as feature_table:\n csv_in = csv.reader(feature_table, delimiter=\"\\t\")\n\n header = [x.lower() for x in next(csv_in)]\n accession_idx = header.index(\"accession\")\n type_idx = header.index(\"type\")\n type_specific_idx = header.index(\"type_specific\")\n description_idx = header.index(\"description\")\n identifier = 2\n for line in csv_in:\n if line[accession_idx] not in feature_dict:\n feature_dict[line[accession_idx]] = dict()\n\n if line[1] not in feature_dict[line[accession_idx]]:\n feature_dict[line[accession_idx]][line[type_idx]] = []\n\n # Insert feature entry\n feature_dict[line[0]][line[1]].append(\n (line[type_specific_idx].split(\",\"), line[description_idx], str(identifier))\n )\n identifier += 1\n\n return feature_dict", "def get_dictionary(self, verbosity='all'):\n return dict([\n # This maps the model to its columns except for id, for which the\n # database mapping and python mapping differ.\n (c.name, str(getattr(self, c.name if c.name != 'id' else 'id_')))\n for c in self.__table__.columns\n ]) if verbosity == 'all' else {}", "def _dict_metadata(metadata):\n new_metadata = copy.deepcopy(metadata)\n tables = new_metadata['tables']\n if isinstance(tables, dict):\n new_metadata['tables'] = {\n table: meta\n for table, meta in tables.items()\n if meta.pop('use', True)\n }\n return new_metadata\n\n new_tables = dict()\n for table in tables:\n if table.pop('use', True):\n new_tables[table.pop('name')] = table\n\n fields = table['fields']\n new_fields = dict()\n for field in fields:\n new_fields[field.pop('name')] = field\n\n table['fields'] = new_fields\n\n new_metadata['tables'] = new_tables\n\n return new_metadata", "def create_sqlalchemy_mapperproperties_from_dbfields(cls,modeltable):\n allprops = {}\n #\n for field in cls.fieldlist:\n props = field.create_sqlalchemy_mapperproperties(cls,modeltable)\n if (props!=None):\n allprops.update(props)\n return allprops", "def summarize_metadata(self):\n meta_dict = {}\n for comp in self.dataset.data_vars:\n for mkey, mvalue in self.dataset[comp].attrs.items():\n meta_dict[f\"{comp}.{mkey}\"] = mvalue\n\n return meta_dict", "def _merge_attributes(self, workout):\n keys = self.__table__.columns.keys()\n for key in keys:\n if key in [\"id\",\n \"external_id\",\n \"is_duplicate_with\",\n \"manual_check_required_with\",\n ]:\n continue\n elif getattr(self, key) == None:\n # copy attribute if empty; else keep existing \n setattr(self, key, getattr(workout, key))", "def insertable_dict(self):\n # .strip('_') is for type_\n return {\n 'f_' +\n p.key.strip('_'): getattr(\n self,\n p.key) for p in self.__mapper__.attrs}", "def categorize_attributes():\n global attr_categories, seeds\n print \"Generating seeds...\"\n seeds = get_seeds()\n\n print \"Categorizing attributes...\"\n categorized = categorize(seeds)\n \n category_distances = {}\n attr_categories = {}\n for c in categorized:\n for (attr, score) in categorized[c]:\n attr_categories[attr] = c\n category_distances[attr] = score", "def apply(self, attributes):\n return {\n self.attribute_names[k]: v\n for k, v in attributes.items()\n }", "def _get_area_incmfd_attr(max_np, max_hd, max_bins):\n\n att = []\n att.append({'name': 'src_id', 'type': 'String', 'len': 10})\n att.append({'name': 'src_name', 'type': 'String', 'len': 30})\n att.append({'name': 'tect_reg', 'type': 'String', 'len': 30})\n att.append({'name': 'upp_seismo', 'type': 'Real'})\n att.append({'name': 'low_seismo', 'type': 'Real'})\n att.append({'name': 'mag_scal_r', 'type': 'String', 'len': 15})\n att.append({'name': 'rup_asp_ra', 'type': 'Real'})\n att.append({'name': 'mfd_type', 'type': 'String', 'len': 20})\n\n att.append({'name': 'min_mag', 'type': 'Real'})\n att.append({'name': 'bin_width', 'type': 'Real'})\n att.append({'name': 'num_bins', 'type': 'Integer'})\n for i in range(1, max_bins+1):\n lab = 'or_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n\n att.append({'name': 'num_npd', 'type': 'Integer'})\n for i in range(1, max_np+1):\n lab = 'weight_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'strike_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'rake_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'dip_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n\n att.append({'name': 'num_hdd', 'type': 'Integer'})\n for i in range(1, max_hd+1):\n lab = 'hdd_d_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n lab = 'hdd_w_%d' % (i)\n att.append({'name': lab, 'type': 'Real'})\n\n return att", "def _get_categorized_columns(tableColumns):\n columns = {}\n columns_ref = {}\n columns_pri = {}\n columns_ignore = {}\n first_pk_col = None\n\n for col_name, col_attrs in tableColumns.iteritems():\n if RuleHandler.STR_SKIP in col_attrs:\n columns_ignore[col_name] = col_attrs\n elif col_attrs['isPk']:\n if first_pk_col is None:\n first_pk_col = col_name\n columns_pri[first_pk_col] = col_attrs\n else:\n col_attrs['isPk'] = False\n columns[col_name] = col_attrs\n\n if first_pk_col in columns_pri:\n columns_pri[first_pk_col]['isPk'] = False\n columns[first_pk_col] = columns_pri[first_pk_col]\n del columns_pri[first_pk_col]\n\n if 'pkC' not in columns_pri:\n columns_pri['pkC'] = copy.copy(col_attrs)\n columns_pri['pkC']['isPkC'] = list([first_pk_col])\n\n columns_pri['pkC']['isPkC'].append(col_name)\n elif col_attrs['reference']:\n columns_ref[col_name] = col_attrs\n else:\n columns[col_name] = col_attrs\n\n return columns_pri, columns_ref, columns, columns_ignore", "def to_obj(self, entity):\n return {k: conv(getattr(entity, k, None)) for k, conv in self.cols_to_obj.items()}", "def table_to_dict(self, tab):\n dict = {}\n for colname in tab.colnames:\n dict[colname] = tab[colname].data\n return dict", "def get_patients_dict(table):\n\tf = open(table)\n\tpatients = f.readline().strip().split(\"\\t\")[1:]\n\t\t \n\tpatients_dict = {}\n\tfor i in patients:\n\t\tpatients_dict[i.replace('\"', '')] = {}\n\t\t \n\tfor i in f:\n\t\tl = i.strip().split(\"\\t\")\n\t\tgene = l[0]\n\n\t\tfor j in range(len(l[1:])):\n\t\t\tpatients_dict[patients[j]][gene] = int(l[1:][j])\n\treturn patients_dict", "def format_bgc_metadata(df,float_id):\n mdf = df[bgc_metadata_columns]\n bgc_metadata_dict = {}\n for col in list(mdf):\n bgc_metadata_dict[col] = list(pd.Series(mdf[col].unique()).astype(str).str.strip())\n bgc_metadata_dict[col] = list(pd.Series(mdf[col].unique()).astype(str).str.strip().replace(\"'\",'\"'))\n bgc_metadata_dict = json.dumps(bgc_metadata_dict) \n bgc_metadata_df = pd.DataFrame({\"float_id\": [float_id], \"Metadata_Dict\": [bgc_metadata_dict]})\n return bgc_metadata_df", "def __get_smart_attr_headers_params(self, href_parsed):\n\n\t\tsmart_attr_to_drive_list_map = {}\n\n\t\th4_elements = href_parsed.find_all('h4')\n\t\tsmart_attr_headers = filter(lambda x: self.SMART_ATTR_HEADER_REGEX.match(x.text), h4_elements)\n\n\t\tfor smart_attr_header in smart_attr_headers:\n\t\t\tdrives = smart_attr_header.find_next(string=self.SMART_ATTR_DRIVE_LIST_REGEX).parent.parent\n\t\t\tdrives = drives.text.split(':')[1]\n\t\t\tsmart_attr_to_drive_list_map[smart_attr_header.text] = self.__clean_drive_text(drives)\n\n\t\treturn smart_attr_to_drive_list_map", "def get_attributes(self) -> Dict[str, str]:\n pass", "def get_attribute_data(self, attrs):\n return {\n 'id': attrs['data-id'],\n }", "def test_fields_to_dict(self):\r\n test_data = \\\r\n \"\"\"0\tR27DLI_4812\tR27DLI_600\tR27DLI_727\tU1PLI_403\tU1PLI_8969\tU1PLI_9080\tU1PLI_9526\tW3Cecum_6642\tW3Cecum_8992\r\n1\tU1PLI_7889\r\n2\tW3Cecum_4858\r\n3\tR27DLI_3243\tR27DLI_4562\tR27DLI_6828\tR27DLI_9097\tU1PLI_2780\tU1PLI_67\tU9PSI_10475\tU9PSI_4341\tW3Cecum_5191\"\"\".splitlines() # output from cd-hit\r\n obs = fields_to_dict(test_data)\r\n exp = {\r\n '0': ['R27DLI_4812', 'R27DLI_600', 'R27DLI_727', 'U1PLI_403',\r\n 'U1PLI_8969', 'U1PLI_9080', 'U1PLI_9526', 'W3Cecum_6642', 'W3Cecum_8992'],\r\n '1': ['U1PLI_7889'],\r\n '2': ['W3Cecum_4858'],\r\n '3': ['R27DLI_3243', 'R27DLI_4562', 'R27DLI_6828', 'R27DLI_9097', 'U1PLI_2780', 'U1PLI_67', 'U9PSI_10475', 'U9PSI_4341', 'W3Cecum_5191']}\r\n self.assertEqual(obs, exp)", "def to_dict(self):\n dictionary = {}\n for column in self.__table__.columns:\n attribute_field_name = self.__mapper__.get_property_by_column(column).key\n attribute = getattr(self, attribute_field_name)\n dictionary[attribute_field_name] = self._make_serializable(attribute)\n\n return dictionary", "def fromTable(cls, table):\n cls.__attrmap__ = {}\n cls.__colmap__ = {}\n allColumns = list(table)\n for column in allColumns:\n attrname = cls.namingConvention(column.model.name)\n cls.__attrmap__[attrname] = column\n cls.__colmap__[column] = attrname", "def buildAdsTable_v1(output_file = None):\r\n ads_table = []\r\n text_props = ['readability_text', '_all']\r\n onto_props_with_mapping = {'phone':['telephone.name', 'telephone.name.raw'], 'email': ['email.name', 'email.name.raw'],\r\n 'posting_date':['inferlink_date', 'readability_date', 'high_recall_readability_date'],\r\n 'price':['price'], 'location':['addressLocality'],\r\n 'name':['name'],\r\n 'ethnicity':['ethnicity'],\r\n 'eye_color':['eyeColor'], 'title':['title'],\r\n 'hair_color':['hairColor'], 'nationality':['nationality'],\r\n 'business_type':['business_type'],\r\n 'business_name':['streetAddress'], 'services':['serviceType'],\r\n 'business': ['streetAddress'],\r\n 'physical_address': ['streetAddress'],\r\n 'gender':['gender'], 'top_level_domain':['top_level_domain'],\r\n 'obfuscation':['telephone.isObfuscated', 'email.isObfuscated'],\r\n 'age':['age'], 'hyperlink:':['relatedLink'], 'drug_use':['drug_use'],\r\n 'review_site':['review_site'], 'review_id':['review_id'],\r\n 'number_of_individuals':['name_count'],\r\n 'ad': ['identifier'],\r\n 'multiple_phone': ['telephone_count'],\r\n 'cluster': ['seller.uri'],\r\n 'seed': ['seller.telephone.name', 'seller.email.name']\r\n }\r\n non_readability_props = ['number_of_individuals', 'ad', 'multiple_phone', 'cluster', 'phone', 'posting_date', 'email']\r\n onto_props_without_mapping = ['image_with_email', 'image_with_phone']\r\n for property, value_list in onto_props_with_mapping.iteritems():\r\n dict = {}\r\n dict['onto_prop'] = property\r\n mappings = []\r\n tmp = {}\r\n for v in value_list:\r\n if property == 'phone' or v == 'seller.telephone.name':\r\n tmp[v] = 'build_phone_match_clause'\r\n tmp['_all'] = 'build_phone_match_clause'\r\n tmp['url'] = 'build_phone_regexp_clause'\r\n elif v == 'email.name':\r\n tmp[v] = 'build_email_match_clause'\r\n tmp['_all'] = 'build_match_phrase_clause'\r\n elif property == 'ad':\r\n tmp[v] = 'build_term_clause'\r\n elif '_count' in v:\r\n tmp[v] = 'build_count_match_clause'\r\n elif property == 'gender':\r\n tmp[v] = 'build_gender_match_clause'\r\n elif property == 'posting_date':\r\n tmp[v] = 'build_match_phrase_clause'\r\n else:\r\n tmp[v] = 'build_match_clause'\r\n if property not in non_readability_props:\r\n for v in text_props: # will overwrite for seller.telephone.name\r\n tmp[v] = 'build_match_clause_inner'\r\n mappings.append(tmp)\r\n dict['mappings'] = mappings\r\n ads_table.append(dict)\r\n\r\n for property in onto_props_without_mapping:\r\n dict = {}\r\n dict['onto_prop'] = property\r\n mappings = []\r\n tmp = {}\r\n for v in text_props:\r\n tmp[v] = 'build_match_clause_inner'\r\n mappings.append(tmp)\r\n dict['mappings'] = mappings\r\n ads_table.append(dict)\r\n\r\n if output_file:\r\n file = codecs.open(output_file, 'w', 'utf-8')\r\n for entry in ads_table:\r\n json.dump(entry, file)\r\n file.write('\\n')\r\n file.close()", "def convert_attr_to_dict(attr):\n\n\tresult = dict()\n\tattr = attr.split(';')\n\tattrlist = [a.split(':') for a in attr]\n\tfor pair in attrlist:\n\t\tif len(pair) == 2:\n\t\t\tkey = pair[0]\n\t\t\tvalue = pair[1]\n\t\t\tresult[key] = value\n\n\treturn result", "def preprocess_attributes(business):\n\tattrs = business[ATTRIBUTES]\n\tattrs_dict = dict()\n\n\tfor (key, val) in attrs.items():\n\t\tval = ast.literal_eval(val)\n\t\tif val is None:\n\t\t\tcontinue\n\n\t\t# if val is a json object\n\t\tif isinstance(val, dict):\n\t\t\tinner_dict = dict()\n\t\t\tfor (inner_key, inner_val) in val.items():\n\t\t\t\tinner_key = inner_key\n\t\t\t\tinner_val = inner_val\n\t\t\t\tinner_dict[inner_key] = inner_val\n\t\t\tattrs_dict[key] = inner_dict\n\t\telse:\n\t\t\tattrs_dict[key] = val\n\tbusiness[ATTRIBUTES] = attrs_dict", "def load_attribute_data():\n global attr_value_counts, attr_counts, value_counts, \\\n attr_value_ratios, attrs\n\n print \"Loading extraction data...\"\n with open('./data/common_extractions.json') as f:\n place_data = json.loads(f.read())\n for place in place_data:\n for attr in place_data[place]:\n if attr not in attr_value_counts:\n attrs.add(attr)\n attr_value_counts[attr] = {}\n attr_counts[attr] = 0\n for value in place_data[place][attr]:\n c = place_data[place][attr][value]\n value_counts[value] = value_counts.get(value, 0) + c\n attr_counts[attr] += c\n attr_value_counts[attr][value] = \\\n attr_value_counts[attr].get(value, 0) + c\n \n for attr in attrs:\n attr_value_ratios[attr] = {}\n for value in attr_value_counts[attr]:\n attr_value_ratios[attr][value] = float(attr_value_counts[attr][value]) \\\n / attr_counts[attr]", "def get_tabledata_for(doc):\n plomino_table = doc.getForm().id\n data = dict(ADDITIONAL_VALUES)\n map = FIELDS_MAP[plomino_table]\n for key in doc.getItems():\n if key not in map:\n continue\n value = normalize_value(doc.getItem(key, None))\n data[map[key]] = value\n post_method = plomino_table + '_postprocessing'\n if post_method in globals():\n globals()[post_method](doc, data)\n iride_table = map['__tablename']\n return {\n iride_table: (data,)\n }", "def parse_product_full_attributes(advertise: Dict[str, Any]) -> Optional[Dict[str, Any]]:\n if \"product_full_attributes\" in advertise.keys():\n\n tmp_dict: Dict[str, Any] = dict()\n tmp: List[str] = re.split('[ \\t]{3,}', advertise[\"product_full_attributes\"])\n\n for term in tmp:\n key_val: List[str] = re.split('[ \\t]{2,}', term)\n if '' in key_val:\n key_val.remove('')\n if len(key_val) == 2:\n tmp_dict[re.sub(r'[^\\w\\s]', '', key_val[0]).strip()] = key_val[1]\n\n return tmp_dict", "def to_dict(self):\n result = {}\n for column_family_id, columns in six.iteritems(self._cells):\n for column_qual, cells in six.iteritems(columns):\n key = (_to_bytes(column_family_id) + b':' +\n _to_bytes(column_qual))\n result[key] = cells\n return result", "def infodump(self):\n idtable = dict.fromkeys(KEYS_IDSTABLE)\n rttable = dict.fromkeys(KEYS_RTTABLE)\n #Construct Common table, IMDb table, Review table\n commontable,imdbtable,reviewtable = self.dump_imdb()\n #Contruct IDTable\n idtable[\"imdb_id\"] = imdbtable[\"imdb_id\"]\n if commontable[\"kind\"] == \"movie\":\n #Contruct RT table\n rttable = self.dump_rt()\n idtable[\"rt_id\"] = rttable[\"rt_id\"]\n return idtable,commontable,imdbtable,rttable,reviewtable", "def _load_attr_labels(self, results):\n results[\"attr_labels\"] = results[\"ann_info\"][\"attr_labels\"]\n return results", "def parse_attribute(self,attr,table):\n \n attr_dict = {}\n \n for child in attr:\n name = child.attrib['name']\n \n #attributes can either have string or bool as the value we need\n #checking for boolean\n if 'val' in child[0].attrib:\n val = child[0].attrib['val']\n \n if val == 'true':\n flag = True\n else:\n flag = False\n \n attr_dict[name] = flag\n \n #else it's string stroed as text\n else:\n attr_dict[name] = super(DatabaseUmlParser,self).stripHashtags(child[0].text)\n \n attr = db_attribute.DbAttribute(table,attr_dict)\n \n return attr", "def _predefined_mapping_tables(dset):\n # for now this is mechanism independent.\n to_airnow = {\n \"OZONE\": \"o3\",\n \"PM2.5\": \"PM2_5_DRY\",\n \"PM10\": \"PM10\",\n \"CO\": \"co\",\n \"SO2\": \"so2\",\n \"NO\": \"no\",\n \"NO2\": \"no2\",\n }\n dset = dset.assign_attrs({\"mapping_tables_to_airnow\": to_airnow})\n return dset" ]
[ "0.65014577", "0.60592574", "0.60217834", "0.6018756", "0.6007916", "0.5968255", "0.58305824", "0.58305734", "0.57943606", "0.575547", "0.57497114", "0.574563", "0.5674587", "0.56742924", "0.56551945", "0.565404", "0.56439865", "0.5624873", "0.561691", "0.55270827", "0.5517848", "0.5494648", "0.54904675", "0.5484256", "0.5468459", "0.545606", "0.545606", "0.545606", "0.545606", "0.545606", "0.5427125", "0.54209733", "0.5405007", "0.538156", "0.5380996", "0.53295773", "0.5327861", "0.5327361", "0.5313287", "0.52995074", "0.52986133", "0.52976704", "0.5294389", "0.52700096", "0.5219843", "0.5218953", "0.5217133", "0.5216252", "0.5215792", "0.52146304", "0.52134025", "0.52097046", "0.52070177", "0.52045417", "0.5200091", "0.5181839", "0.5174498", "0.517283", "0.51695555", "0.5165833", "0.51553595", "0.5153838", "0.5142263", "0.5140614", "0.51383865", "0.51303357", "0.5127479", "0.5127117", "0.5126583", "0.5123767", "0.5117198", "0.5116267", "0.5115531", "0.5100252", "0.50985456", "0.5097563", "0.50846046", "0.50838685", "0.5074941", "0.5069607", "0.5068318", "0.50680345", "0.50671005", "0.50646365", "0.50607026", "0.5055063", "0.504997", "0.5047832", "0.5036148", "0.5033958", "0.5020809", "0.50199145", "0.5016523", "0.5016275", "0.5001607", "0.50001097", "0.49996427", "0.49974546", "0.49925262", "0.4982853" ]
0.8673553
0
reformat count table into a flattened table of sample_names/values
def reformat_countTable( self,analysis_id_I=None,sna2experimentID_I=None, sna2sns_I=None): if self.countTable: countTable = self.countTable[:]; else: countTable = []; countTable_flat = self.reformat_countOrFPKMTable( countOrFPKMTable_I=countTable, analysis_id_I=analysis_id_I, sna2experimentID_I=sna2experimentID_I, sna2sns_I=sna2sns_I, count_or_FPKM = 'count'); return countTable_flat;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def freq_table(a):\n Detail_freq = a.loc[:, (a.dtypes == object) | (a.dtypes == long) ].columns.get_values().tolist()\n print(Detail_freq)\n for freq in Detail_freq:\n df1 = pd.DataFrame(a[freq].value_counts(dropna=False).astype(float).map('{:20,.0f}'.format).sort_index()).rename(columns={freq:'Count'})\n df2 = pd.DataFrame(a[freq].value_counts(normalize = True, dropna=False).map('{:,.2%}'.format).sort_index()).rename(columns={freq:'Percentage'})\n df = pd.concat([df1, df2], axis = 1)\n print(df)", "def write_count_table(filtered_counts, gene_names, ids_liver_header):\n with open(\"filtered_tcga_counts.tsv\", \"w\") as file:\n file.write(\"gene_id\\tgene_name\\t\" + \"\\t\".join(ids_liver_header) + \"\\n\")\n for gene_name, counts_line in zip(gene_names, filtered_counts):\n file.write(gene_name + \"\\t\" + \"\\t\" + \"\\t\".join(counts_line) + \"\\n\")", "def __profile_to_table(data):\n output = [[\"condition\", \"mean\", \"min\", \"max\"]]\n order = data[\"order\"]\n\n for o in order:\n try:\n values = data[\"data\"][o]\n output.append(\n [o, str(mean(values)), str(min(values)), str(max(values))]\n )\n except Exception as e:\n print(e)\n\n return \"\\n\".join([\"\\t\".join(l) for l in output])", "def reformat_countOrFPKMTable(\n self,\n countOrFPKMTable_I=None,\n analysis_id_I=None,\n sna2experimentID_I=None,\n sna2sns_I=None,\n count_or_FPKM = 'count'):\n #format into a dictionary of rows for quick aligning with the tracking_id\n countOrFPKMTable_flat = [];\n for row in countOrFPKMTable_I:\n for k,v in row.items():\n if k=='tracking_id':continue;\n tmp = {};\n tmp['analysis_id'] = analysis_id_I;\n tmp['tracking_id'] = row['tracking_id'];\n\n sample_name_lst = k.split('_');\n sample_name_base = '_'.join(sample_name_lst[:-1]);\n sample_name_rep = eval(sample_name_lst[-1]);\n if sna2experimentID_I: \n experiment_id = sna2experimentID_I[sample_name_base];\n else:\n experiment_id=None;\n tmp['experiment_id'] = experiment_id;\n if sna2sns_I: \n sample_name = sna2sns_I[sample_name_base][sample_name_rep];\n else:\n sample_name=k;\n tmp['sample_name'] = sample_name;\n\n tmp['value'] = v;\n tmp['value_units'] = count_or_FPKM;\n tmp['used_'] = True;\n tmp['comment_'] = None;\n countOrFPKMTable_flat.append(tmp);\n return countOrFPKMTable_flat;", "def to_table(self):\n table = Table()\n table['THETA_LO'] = Quantity([self.counts.offset[:-1]], unit=self.counts.offset.unit)\n table['THETA_HI'] = Quantity([self.counts.offset[1:]], unit=self.counts.offset.unit)\n table['ENERG_LO'] = Quantity([self.counts.energy[:-1]], unit=self.counts.energy.unit)\n table['ENERG_HI'] = Quantity([self.counts.energy[1:]], unit=self.counts.energy.unit)\n table['counts'] = self.counts.to_table()['data']\n if self.counts.data_err is not None:\n table['counts_err'] = self.counts.to_table()['data_err']\n table['livetime'] = self.livetime.to_table()['data']\n table['bkg'] = self.bg_rate.to_table()['data']\n if self.bg_rate.data_err is not None:\n table['bkg_err'] = self.bg_rate.to_table()['data_err']\n table.meta['HDUNAME'] = \"bkg_2d\"\n return table", "def format_unifrac_sample_mapping(sample_ids, otu_ids, otu_table_array):\r\n out = []\r\n for i, row in enumerate(otu_table_array):\r\n for j, val in enumerate(row):\r\n if val > 0:\r\n line = [otu_ids[i], sample_ids[j], str(val)]\r\n out.append('\\t'.join(line))\r\n return out", "def table(x):\n c = Counter(x)\n return list(c), list(c.values())", "def accumulate_entries_as_tables(entries):\n name_table = {}\n num_table = {}\n for number, name in entries:\n name_table[name] = number\n num_table[number] = name\n\n return name_table, num_table", "def print_table(source, count=False):\n table_value = []\n table_header = []\n for source_key, source_value in source.items():\n for item in source_value:\n table_value.append([v for v in item.values()])\n table_header.append([k for k in item.keys()])\n if not count:\n print(tabulate(table_value,\n headers=table_header[0],\n tablefmt='orgtbl'))\n else:\n print(tabulate([[len(source_value)]],\n headers=[source_key],\n tablefmt='orgtbl'))", "def write_count_matrix(pb_count, outfile, first=1):\n # write the header (PB names)\n print(\" \" + \"\".join([\"%6s\" % name for name in NAMES]), file=outfile)\n # write the data table\n for residue_idx, residue_pb in enumerate(pb_count):\n print(\"%-5d\" % (residue_idx + first) +\n \" \".join(\"%5d\" % i for i in residue_pb), file=outfile)", "def summarize_sample_props(psd_list, sample_list):\n prop_list = [psd.sample_props for psd in psd_list]\n cols = ['amplicon median', 'mean size', 'lower size', 'upper size']\n\n return pd.DataFrame(prop_list, columns=cols, index=sample_list)", "def get_split_summary_table(all_df, train_df, test_df):\n table = PrettyTable()\n table.field_names = ['set', 'N total', 'N non-ICH', 'N ICH', 'frac non-ICH', 'frac ICH']\n for df, name in zip([all_df, train_df, test_df],['All', 'Train', 'Test']):\n table.add_row([name, len(df), len(df[df.Hemorrhage == 0]), len(df[df.Hemorrhage == 1]),\n f'{len(df[df.Hemorrhage == 0])/len(df):.3%}', f'{len(df[df.Hemorrhage == 1])/len(df):.3%}'])\n return table", "def generate_table(self, outtablename,\n cols=['A', 'B', 'AB'],\n generateTable=True):\n if generateTable:\n new_indices = ['time (s)', 'mean counts']\n for idx in self.data[cols].describe().index[2:]:\n new_indices.append(idx)\n outTable = self.data[cols].describe()\\\n .set_index(pd.Index(new_indices))\n outTable.to_latex(\n self.tables_dir + outtablename + \".tex\", float_format=\"%d\")\n print(\"Outtable: \", outTable)", "def construct_table(self):\n table_str = self.header_row\n row_lbls, col_lbls = self.get_idxvals()\n for r,rlbl in enumerate(row_lbls):\n row_data = [self.data[rlbl,clbl] for clbl in col_lbls]\n table_str += self.construct_row(r, row_data)\n \n return table_str", "def produce_mirna_single_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.single.bam.mirbase_counts.txt\")", "def summary():\n\n summary_result = session.query(Summary.Count, Summary.Total).all()\n session.close()\n\n # Return a List of Column Names (Sample Names)\n return jsonify(summary_result)", "def usage_table_format(result):\n table = []\n for item in result:\n row = OrderedDict()\n row['Value'] = item['name']['localizedValue']\n row['Usage'] = item['currentValue'] or \"0\"\n row['Limit'] = item['limit'] or \"0\"\n table.append(row)\n return table", "def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df", "def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df", "def num_54():\n frmt = \"\"\"\n :{}\n :Generate Data that conform to a uniform distribution.\n :\n :Class values: {}\n :Population size: {}\n :Results:\n : values:\n {}\n : table:\n {}\n : histogram: (class, frequency)\n {}\n :Then use NumPyArrayToTable to get your table.\n \"\"\"\n # import numpy as np\n st = 1\n end = 7\n vals = np.arange(st,end)\n reps = 10\n z = np.repeat(vals,reps)\n np.random.shuffle(z)\n ID = np.arange(len(z))\n tbl = np.array(list(zip(ID, z)), \n dtype = [('ID', 'int'), ('Class', 'int')])\n h = np.histogram(z, np.arange(st, end+1))\n h = np.array(list(zip(h[1], h[0])))\n pad = \" \"\n args =[num_54.__doc__, vals, reps*len(vals),\n indent(str(z.reshape(3,20)), pad),\n indent(str(tbl), pad), indent(str(h), pad)]\n print(dedent(frmt).format(*args))", "def samples(app, args):\n engine = create_engine(args.datafile)\n meta = MetaData()\n meta.reflect(engine)\n print(\"\\t\".join([str(x).replace('counts.', '')\n for x in meta.tables['counts'].columns\n if not x == 'counts.index']))", "def produce_mirna_unique_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.uniq.bam.mirbase_counts.txt\")", "def to_orange_table(samples):\n # Create table and fill it with sample data:\n table = []\n for sample in samples:\n table.append(_parse_sample_descriptor(sample.descriptor['sample']))\n\n # Create domain (header in table):\n header = [var[1]['type'].make(var[0]) for var in DATA]\n\n # It is necessary to provide all possible values for dicrete variable with\n # Iterate through all discrete variables in header:\n for head_, i in [(var, i) for i, (var, dat) in enumerate(zip(header, DATA)) if dat[1]['type'] == DiscreteVariable]:\n # Provide all possible values for discrete_var:\n head_.values = list(set([sample[i] for sample in table]))\n\n metas = [var[1]['type'].make(var[0]) for var in METAS]\n return Table(Domain(header, metas=metas), table)", "def raw_counts(self):\n return np.array([[1, 2], [3, 4], [5, 6]])", "def append_counting(dict):\n row_c = []\n # for nuc in NUC: #Scans all the elements and adds it to the table.\n # row_c.append(dict[nuc])\n for mot in MOT:\n row_c.append(dict[mot])\n for nuc_nr in NUC_NR :\n row_c.append(dict[nuc_nr + \"_NR\"])\n # #row.extend([dict[\"AA_NR\"], dict[\"TT_NR\"], dict[\"CC_NR\"], dict[\"GG_NR\"]])\n return row_c", "def _get_table_from_samples(self, index):\n df = pd.DataFrame()\n for sample in self.samples:\n sd = sample.to_dict()\n ser = pd.Series(\n {k: v for (k, v) in list(sd.items()) if not k.startswith(\"_\")}\n )\n df = df.append(ser, ignore_index=True)\n index = [index] if isinstance(index, str) else index\n if not all([i in df.columns for i in index]):\n _LOGGER.debug(\n \"Could not set {} index. At least one of the \"\n \"requested columns does not exist: {}\".\n format(CFG_SAMPLE_TABLE_KEY, index))\n return df\n _LOGGER.debug(\"Setting sample_table index to: {}\".format(index))\n df.set_index(keys=index, drop=False, inplace=True)\n return df", "def _create_counts(out_dts, out_dir):\n ma, ma_mirna = _merge(out_dts)\n out_ma = op.join(out_dir, \"counts.tsv\")\n out_ma_mirna = op.join(out_dir, \"counts_mirna.tsv\")\n ma.to_csv(out_ma, sep=\"\\t\")\n ma_mirna.to_csv(out_ma_mirna, sep=\"\\t\")\n return out_ma_mirna, out_ma", "def count_frequency(df, count_columns: list, group_columns=['Fabric_name', 'Fabric_label'], margin_column_row:tuple=None):\n\n if margin_column_row and len(margin_column_row) == 2:\n if all([isinstance(element, bool) for element in margin_column_row]):\n # margin_column_row = ((False, False),) * len(count_columns)\n margin_column_row = (margin_column_row, ) * len(count_columns)\n\n # by default keep summary row but remove summary column\n if not margin_column_row:\n margin_column_row = ((False, True),) * len(count_columns)\n if len(count_columns) != len(margin_column_row):\n print('\\n')\n print('Parameters count_columns and margin_column_row in count_frequency function have different length')\n exit()\n\n index_lst = [df[column] for column in group_columns if column in df.columns and df[column].notna().any()]\n frequency_df = pd.DataFrame()\n\n for column, (margin_column, margin_row) in zip(count_columns, margin_column_row):\n if column in df.columns and df[column].notna().any():\n df[column].fillna(np.nan, inplace=True)\n current_df = pd.crosstab(index=index_lst, columns=df[column], margins=any((margin_column, margin_row)))\n current_df = current_df.sort_index()\n if any((margin_column, margin_row)):\n # drop column All\n if not margin_column:\n current_df.drop(columns=['All'], inplace=True)\n # drop row All\n if not margin_row:\n current_df.drop(index=['All'], inplace=True)\n if frequency_df.empty:\n frequency_df = current_df.copy()\n else:\n frequency_df = frequency_df.merge(current_df, how='outer', on=group_columns)\n\n frequency_df.fillna(0, inplace=True) \n frequency_df.reset_index(inplace=True) \n return frequency_df", "def count_frequency(df, count_columns: list, group_columns=['Fabric_name', 'Fabric_label'], margin_column_row:tuple=None):\n\n if margin_column_row and len(margin_column_row) == 2:\n if all([isinstance(element, bool) for element in margin_column_row]):\n # margin_column_row = ((False, False),) * len(count_columns)\n margin_column_row = (margin_column_row, ) * len(count_columns)\n\n # by default keep summary row but remove summary column\n if not margin_column_row:\n margin_column_row = ((False, True),) * len(count_columns)\n if len(count_columns) != len(margin_column_row):\n print('\\n')\n print('Parameters count_columns and margin_column_row in count_frequency function have different length')\n exit()\n\n index_lst = [df[column] for column in group_columns if column in df.columns and df[column].notna().any()]\n frequency_df = pd.DataFrame()\n\n for column, (margin_column, margin_row) in zip(count_columns, margin_column_row):\n if column in df.columns and df[column].notna().any():\n df[column].fillna(np.nan, inplace=True)\n current_df = pd.crosstab(index=index_lst, columns=df[column], margins=any((margin_column, margin_row)))\n current_df = current_df.sort_index()\n if any((margin_column, margin_row)):\n # drop column All\n if not margin_column:\n current_df.drop(columns=['All'], inplace=True)\n # drop row All\n if not margin_row:\n current_df.drop(index=['All'], inplace=True)\n if frequency_df.empty:\n frequency_df = current_df.copy()\n else:\n frequency_df = frequency_df.merge(current_df, how='outer', on=group_columns)\n\n frequency_df.fillna(0, inplace=True) \n frequency_df.reset_index(inplace=True) \n return frequency_df", "def encoding_labelcount(df, target=None):\n if not target:\n target = ['user_id', 'title']\n\n norm = round(\n df.shape[0] / 10000) # normalize the count by /per 100000 entries\n for col in target:\n df[col + '_labelcount'] = df[col].map(df[col].value_counts()) / norm\n df.drop([col], axis=1, inplace=True)\n return None", "def build_metrics_counter_data(count_metrics):\n return [{'name': name, 'delta': delta} for name, delta in iteritems(count_metrics)]", "def make_stats(mapping):\r\n stats = [\"Clustersize\\t#\"]\r\n counts = defaultdict(int)\r\n for key in mapping.keys():\r\n counts[len(mapping[key])] += 1\r\n\r\n keys = sorted(counts.keys())\r\n for key in keys:\r\n stats.append(\"%d:\\t\\t%d\" % (key + 1, counts[key]))\r\n return \"\\n\".join(stats)", "def count_variants(vcf_list, sample_list):\n\n df_lst = []\n\n sample_vcf_dct = dict(zip(sample_list,vcf_list))\n\n for s in sample_vcf_dct.keys():\n\n vcf_in = sample_vcf_dct[s]\n vcf = VariantFile(vcf_in)\n\n snv = 0\n indel = 0\n\n for rec in vcf:\n\n ref_len = len(rec.ref)\n\n for a in rec.alts:\n if len(a) > 1 or ref_len > 1:\n indel +=1\n else:\n snv +=1\n\n df_lst.append([s,snv,indel])\n\n out_df = pd.DataFrame(df_lst, columns=['sample','snvs','indels'])\n\n return out_df", "def counts_table(data, attr):\n pd.options.mode.chained_assignment = None # default='warn'\n # gets class names for dataframe manipulation\n classes = attr.tail(1)['vars'].tolist()\n classlist = [classes[0][0], classes[0][1]]\n # expanding a table to have all variable options in a column with their \n # parent attribute\n allvariables = attr.apply(lambda x: pd.Series(x['vars']),axis=1).stack().reset_index(level=1, drop=True)\n allvariables.name='var'\n allvariables = attr.drop('vars', axis=1).join(allvariables)\n av = allvariables.drop(attr.index[-1])\n # populate the table with counts\n for c in classlist:\n clist = []\n count = 0\n for i, row in av.iterrows():\n att = row['attr']\n var = row['var']\n sub = data[[att,'class']]\n sub = sub[sub[att]==var]\n if not sub.empty:\n ssub = sub[sub['class']==c]\n if not ssub.empty:\n count = len(ssub)\n else:\n count = 0\n clist.append(count)\n av[c] = clist\n\n return av", "def print_all_counts_as_shasta_matrix(all_counts, max_count=50, pseudocount=1):\n a_t_counts = all_counts[\"A\"] + all_counts[\"T\"]\n g_c_counts = all_counts[\"G\"] + all_counts[\"C\"]\n\n total = 0\n for i in range(max_count + 1):\n total += max(pseudocount, a_t_counts[i])\n\n line = list()\n for i in range(max_count + 1):\n count = max(pseudocount, a_t_counts[i])\n line.append(\"%.9f\" % math.log((count/total),10))\n\n print(\">AT prior\")\n print(\",\".join(line))\n print()\n\n total = 0\n for i in range(max_count + 1):\n total += max(pseudocount, g_c_counts[i])\n\n line = list()\n for i in range(max_count + 1):\n count = max(pseudocount, g_c_counts[i])\n line.append(\"%.9f\" % math.log((count/total),10))\n\n print(\">GC prior\")\n print(\",\".join(line))\n print()", "def format_histogram_one_count(counts, bin_edges):\r\n lines = []\r\n lines.append('Length\\tCount')\r\n for edge, count in zip(bin_edges, counts):\r\n lines.append('\\t'.join(map(str, [edge, count])))\r\n return '\\n'.join(lines)", "def list_to_table(data, col_count):\r\n\r\n if len(data) % col_count != 0:\r\n message = \"Cannot convert list to table. \" \\\r\n \"The total number of cells ({0}) is not compatible with the number of columns ({1})\"\\\r\n .format(len(data), col_count)\r\n raise ValueError(message)\r\n\r\n row_count = len(data) // col_count\r\n # cpp way\r\n tabled_data = []\r\n for row_i in range(row_count):\r\n row = []\r\n for col_i in range(col_count):\r\n row.append(data[row_i * col_count + col_i])\r\n tabled_data.append(row)\r\n return tabled_data", "def summary_scores(fold_scores_list): \n d = {}\n nulls = {}\n for idx, score in enumerate(fold_scores_list):\n d.update({f\"{idx+1}0 samples:\": score[0].T})\n nulls.update({f\"{idx+1}0 samples:\": score[1].T})\n \n summary_scores = pd.concat(d.values(), axis=1, keys=d.keys()).T\n summary_nulls = pd.concat(nulls.values(), axis=1, keys=nulls.keys()).T\n return summary_scores, summary_nulls", "def produce_mirna_allbest_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.bam.mirbase_counts.txt\")", "def table(self) -> pandas.DataFrame:\n if self._table is None:\n VALUE_LABEL = (self.value_label if self.value_label\n else CountColumns.index)\n\n if not any((self.show_counts, self.show_percentages)):\n raise ConfigurationError(\"Need to set at least one thing to show\")\n if self.show_counts:\n self._table = self.counts.reset_index()\n self._table.columns = [VALUE_LABEL, CountColumns.count]\n if self.show_percentages:\n percentages = self.percentages.round(self.decimal_places)\n if self._table is None:\n self._table = percentages.reset_index()\n self._table.columns = [VALUE_LABEL, CountColumns.percentage]\n else:\n self._table[CountColumns.percentage] = percentages.values\n return self._table", "def get_value_counts_pd(X, columns, cate_cap=30):\n count_dict = get_value_counts(X, columns=columns, cate_cap=cate_cap)\n idx_tuple = []\n value_counts = []\n for col in count_dict.keys():\n if type(count_dict[col]) == str:\n idx_tuple += [(col, col)]\n value_counts += ['Too many categories']\n else: \n temp = [[col]*len(count_dict[col]), count_dict[col].keys()]\n idx_tuple += list(zip(*temp))\n value_counts += count_dict[col].values()\n multiidx = pd.MultiIndex.from_tuples(idx_tuple, names=['column', 'category'])\n counts_df = pd.DataFrame(value_counts, columns=['counts'], index=multiidx)\n return counts_df", "def generate_table(results):\n keyslist = list(results[0].keys())\n table = PrettyTable(keyslist)\n for dct in results:\n table.add_row([dct.get(c, \"\") for c in keyslist])\n return table", "def test_toTable(self):\r\n # Empty results.\r\n out_f = StringIO()\r\n self.res1.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(),\r\n \"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\\n\")\r\n out_f.close()\r\n\r\n # Results with multiple samples.\r\n exp = \"\"\"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\r\nS1\\t5\\t21\\t1.5\\t2.5\\t3.5\r\nS1\\t10\\t20\\t2.5\\t2.5\\t3.5\r\nS1\\t20\\t30\\t3.5\\t2.5\\t3.5\r\nS2\\t1\\t3\\t0.4\\t2.5\\t3.5\r\n\"\"\"\r\n out_f = StringIO()\r\n self.res2.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()\r\n\r\n # Custom header.\r\n exp = \"\"\"foo\\tbar\\tbaz\\tbazaar\\tbazaaar\\tbazaaaar\r\nS1\\t5\\t21\\t1.5\\t2.5\\t3.5\r\n\"\"\"\r\n out_f = StringIO()\r\n self.res1.addSample('S1', 42)\r\n self.res1.addSampleEstimate('S1', 5, 21, 1.5, 2.5, 3.5)\r\n self.res1.toTable(out_f,\r\n header=['foo', 'bar', 'baz', 'bazaar', 'bazaaar', 'bazaaaar'])\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()\r\n\r\n # Invalid header.\r\n with self.assertRaises(ValueError):\r\n out_f = StringIO()\r\n self.res1.toTable(out_f, header=['foo'])\r\n\r\n # Cells with None as their value.\r\n exp = \"\"\"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\r\nS1\\t43\\tN/A\\tN/A\\tN/A\\tN/A\r\n\"\"\"\r\n out_f = StringIO()\r\n res = RichnessEstimatesResults()\r\n res.addSample('S1', 42)\r\n res.addSampleEstimate('S1', 43, None, None, None, None)\r\n res.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()", "def create_numbers_table():\n work_tuples = parse_columns()\n print('\\n\\n\\n ----- Tableau récapitulatif -----')\n print('-----------------------')\n for ii in work_tuples:\n line = '|'\n for ij in ii:\n line += ' ij |'\n print(line)\n print('-----------------------')", "def to_frame(self):\n # Create a set of dictionaries/lists for each column\n data = dict([(i_var.name, []) for i_var in self.inputs])\n data.update({self.OUTPUT_LABEL: [], self.INPUT_LABEL: [], self.name: []})\n\n # A very ugly loop to produce all the probabilities in a nice way.\n # Note that this just reproduces what is already in `self.lookup`.\n # Honestly, I just haven't thought of a better way to get nice output.\n for i_index, i_state in enumerate(self.input_states):\n for o_var, results in zip(self.outputs, self.per_state_results):\n for o_state, o_p in enumerate(results[i_index]):\n for i_var, s in zip(self.inputs, i_state):\n data[i_var.name].append(s)\n data[self.OUTPUT_LABEL].append(o_var.name)\n data[self.INPUT_LABEL].append(o_state)\n data[self.name].append(o_p)\n all_data = pd.DataFrame(data=data)\n\n # The magnificent pivot table function does all the work\n return pd.pivot_table(data=all_data, values=[self.name],\n index=[i_var.name for i_var in self.inputs],\n columns=[self.OUTPUT_LABEL, self.INPUT_LABEL])", "def summarize_chrom_classif_by_sample(psd_list, sample_list):\n cl_list = [psd.chrom_props.classif for psd in psd_list]\n cols = psd_list[0].chrom_props.index\n df_stat = pd.DataFrame(cl_list, columns=cols, index=sample_list)\n\n return df_stat", "def generate_table(self, rows):\n ...", "def aggregate_counts(counts_files,\n output_file = '/dev/stdout', \n sample_names=None, \n sep=\"\\t\", \n header=0, \n comment=\"#\"):\n sample_pos = -1\n \n if sample_names is not None:\n if len(sample_names)!=len(counts_files):\n logging.error(\"Number of sample names is not the same length as \",\n \"the number of counts files.\")\n raise RuntimeError(\"\")\n\n # read in all counts files\n counts_df = [pd.read_csv(file, sep=sep, header=header, comment=comment) \n for file in counts_files]\n\n # overwrite the sample names if provided\n if sample_names:\n for i, df in enumerate(counts_df):\n #counts_df[i].columns[sample_pos] = sample_names[i]\n new_columns = df.columns.tolist()\n new_columns[sample_pos] = sample_names[i]\n df.columns = new_columns\n else:\n # check sample names are all different\n sample_names_from_files = [df.columns[sample_pos] for df in counts_df]\n\n if (len(set(sample_names_from_files))<len(counts_files)):\n logging.error(\"Sample names in counts files are not unique. Fix \",\n \"or provide a list of sample names to use.\")\n raise RunTimeError()\n\n\n # merge the dataframes together\n merged_df = reduce(lambda x, y: pd.merge(x,y), counts_df)\n\n\n # output\n if header is not None:\n out_header = True\n\n with open(output_file, 'w') as handle:\n merged_df.to_csv(handle, sep=sep, header=out_header, index=False)\n\n return 0", "def _collect_counts(self, instance_list):\n \"\"\" Based on each instance, I augment empirical counts for every word and its BIO label in feature_count_table and for every transition from previous label to current label in transition_count_table.\n All \"rare words\" (those words that appear less than 3 times) are replaced by <UNK>.\n I also add label|START counts.\n \"\"\"\n # Build feature_count_table of V x labels and transition_count_table of labels x labels\n for instance in instance_list: # Set of <(w, pos), l>\n index = 0\n for t in instance.data: # Tuple of (w, pos)\n index = instance.data.index(t)\n # print t[0] # word\n # print instance.label[index] # label\n if t in self.V:\n self.feature_count_table[self.V.index(t)][self.labels.index(instance.label[index])] +=1\n else:\n self.feature_count_table[self.V.index('<UNK>')][self.labels.index(instance.label[index])] +=1\n if index > 0:\n self.transition_count_table[self.labels.index(instance.label[index-1])][self.labels.index(instance.label[index])] += 1\n else:\n self.transition_count_table[len(self.labels)][self.labels.index(instance.label[index])] += 1", "def _format_data(self):\n formatted_data = []\n\n for row in self._data_agg_by_mean_value.iterrows():\n \n car_make = row[0]\n mean_car_value = round(row[1][0], 2)\n formatted_data.append({'car_make': car_make, 'mean_car_value': mean_car_value})\n\n return formatted_data", "def tabulate(image_path):\n img = cv.imread(image_path, cv.IMREAD_ANYDEPTH)\n unique, counts = np.unique(img, return_counts=True)\n dummy = [print(a[0], a[1]) for a in zip(unique, counts)]", "def tabulate(self):\n for test_name, test in self.test_types.items():\n for ivs_name, ivs in self.ivs.items():\n if self.verbose:\n print(\"{0}: {1}\".format(ivs_name, test_name))\n tree = test(ivs)\n if not tree:\n continue\n score = tree.score(True)\n if self.verbose > 1:\n tree.print_structure()\n\n self.result_matrix['ivs name'][ivs_name][test_name] = score\n self.result_matrix['test type'][test_name][ivs_name] = score", "def make_summary_tables( res ):\n\n # transform second table to csv and read this as a dataFrame\n result_fit_df = pd.read_csv(StringIO( res.tables[1].as_csv() ), sep=\",\",index_col=0)\n result_fit_df.columns = [i.strip() for i in result_fit_df.columns]\n result_fit_df.index = [i.strip() for i in result_fit_df.index]\n\n # first table is trickier because the data is spread on to columns, and there is title line\n L = res.tables[0].as_html().split('\\n')\n L.pop(1) # get rid of the title\n tmp = pd.read_html('\\n'.join(L) , header=None)[0] # read as a dataframe, but with 4 columns \n\n names = list(tmp[0]) + list(tmp[2])[:-2] # columns 0 and 2 are metric names\n values = list(tmp[1]) + list(tmp[3])[:-2] # columns 1 and 3 are the corresponding values\n # NB : I exclude the last 2 elements which are empty \n \n result_general_df = pd.DataFrame( {'Name': names , 'Value' : values}, index = names , columns=['Value'] )\n \n return result_general_df , result_fit_df", "def get_table_data(data):\n logger.info('Generating summary table')\n\n table_dict = data.T.to_dict()\n table = []\n for date in sorted(data.index):\n table.append(table_dict[date])\n table.sort(key=lambda x: x['DATE'], reverse=True)\n for ii in range(len(table)):\n for k, v in table[ii].items():\n if pd.isnull(v):\n table[ii][k] = None\n return table", "def from_multicsv(self,input_data):\n reformatted_data = []\n for (i,row) in enumerate(input_data):\n if i==0:\n headers = row\n else:\n data_row = {}\n for (j,h) in enumerate(headers):\n if j<len(row):\n data_row.update({h : row[j]})\n else:\n data_row.update({h : 0})\n reformatted_data.append(data_row)\n return reformatted_data", "def prepare_table(table):\n n = len(table)\n for i, row in enumerate(table):\n assert len(row) == n, f\"len(row) = {len(row)} != {n} = n\"\n for j, _ in enumerate(row):\n if i == j:\n table[i][i] = 0.0\n elif i > j:\n table[i][j] = 1 - table[j][i]\n return table", "def tabler(subcorpus_names, list_of_dicts, num_rows):\n import pandas as pd\n cols = []\n for subcorp, data in zip(subcorpus_names, list_of_dicts):\n col = pd.Series([w for w, v in data.most_common(num_rows)], name = subcorp)\n cols.append(col)\n word_table = pd.concat(cols, axis = 1)\n return word_table", "def create_table(categories:list)->str:\n\n total_spent = get_total_spent(categories)\n\n table = str()\n\n for row_num in range(11):\n row_label = 100 - row_num*10\n\n # Row label creation - ie 100| .. 90| ... 80| ...etc\n row = f\"{row_label:>3}|\"\n\n for category in categories:\n percentage = math.floor(category.total_spent/total_spent * 10) * 10\n if percentage >= row_label:\n row += ' o '\n else:\n row += ' '\n \n table += row + ' \\n'\n return table", "def create_table(values, col_labels, act, s, max_=False, att=0):\n sort_by = ['combinator', 'normaliz', 'drop'] if att != 0 else ['combinator', 'normaliz', 'init', 'drop']\n table = pd.DataFrame(values, columns=col_labels).sort_values(by=sort_by)\n cm_green = sns.light_palette(\"green\", as_cmap=True)\n cm_green_inv = sns.light_palette(\"green\", n_colors=10, reverse=True, as_cmap=True)\n subset_to_color = ['act', 'combinator', 'normaliz', 'init', 'drop']\n if max_ is False:\n subset_to_color = subset_to_color[1:]\n if att != 0:\n subset_to_color = ['combinator']\n # TODO: set back to col_labels[-10:] when epochs are 200\n styled_table = table.style.apply(apply_color, subset=subset_to_color, axis=0). \\\n apply(highlight_max, axis=None, subset=col_labels[-3:], mode=s). \\\n apply(highlight_max, axis=None, subset=['min acc'], mode=s). \\\n apply(highlight_max, axis=None, subset=['max acc'], mode=s). \\\n hide_index(). \\\n background_gradient(subset='d(a₀,aₙ)', cmap=cm_green). \\\n background_gradient(subset=['zeroed neurons'], cmap=cm_green). \\\n background_gradient(subset=['params'], cmap=cm_green_inv). \\\n set_properties(**{'width': '250px'}, **{'text-align': 'center'}). \\\n set_caption(act + ' [{}]'.format(s.upper())). \\\n set_table_styles([{'selector': 'caption', 'props': [('color', 'black'), ('font-size', '25px')]},\n {'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},\n {'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},\n {'selector': 'th', 'props': [('background', '#606060'), ('color', 'white')]}])\n return styled_table", "def _finalize(self, sampled_data):\n final_data = dict()\n for table_name, table_rows in sampled_data.items():\n parents = self.metadata.get_parents(table_name)\n if parents:\n for parent_name in parents:\n foreign_key = self.metadata.get_foreign_key(parent_name, table_name)\n if foreign_key not in table_rows:\n parent_ids = self._find_parent_ids(table_name, parent_name, sampled_data)\n table_rows[foreign_key] = parent_ids\n\n reversed_data = self.metadata.reverse_transform(table_name, table_rows)\n\n fields = self.metadata.get_fields(table_name)\n\n final_data[table_name] = reversed_data[list(fields.keys())]\n\n return final_data", "def table_summary():\n \n t = dict()\n t['name'] = get_names()\n t['Name'] = [get_properties(name)['label'] for name in t['name']]\n N = len(t['name'])\n \n # host\n t['host'] = ['Sagittarius', 'Sagittarius', 'none', 'Gaia-Sausage-Enceladus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Sequoia / Arjuna', np.nan, np.nan, 'Sequoia / Arjuna', 'Gaia-Sausage-Enceladus', 'Sequoia / Arjuna', 'Helmi / Wukong', 'Helmi / Wukong', 'Sagittarius', 'in situ / Helmi / Wukong', 'Helmi / Wukong', 'Cetus', 'Cetus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Cetus', 'Sequoia / Arjuna / I\\'itoi']\n \n # progenitor\n t['progenitor'] = [np.nan, np.nan, 'itself', 'NGC 5139', 'NGC 4590', np.nan, 'NGC 3201', '(Wukong / Helmi)', '(Wukong / Helmi)', np.nan, np.nan, np.nan, np.nan, 'NGC 5024', np.nan, 'NGC 5272', 'NGC 5024', 'NGC 5824', 'NGC 5824', np.nan, np.nan, np.nan, np.nan]\n \n # progenitor type\n t['type'] = ['DG' if name in ['elqui', 'indus', 'jhelum'] else 'GC' for name in t['name']]\n \n # metallicity\n t['feh'] = [-2.4, -2.4, -2.2, -1.5, -2.16, -2.3, -1.5, -2.1, -2.1, -1.6, -1.95, -1.6, -2.7, np.nan, -1.7, -1.1, -2.7, -1.9, np.nan, np.nan, -2.2, np.nan, -1.9]\n \n # associations\n t['friends'] = ['ATLAS', 'Aliqa Uma', np.nan, np.nan, np.nan, np.nan, np.nan, 'Jhelum', 'Indus', np.nan, np.nan, np.nan, np.nan, 'Sylgr', np.nan, np.nan, 'Ravi', 'Turbio', 'Triangulum', np.nan, np.nan, np.nan, np.nan]\n \n tout = Table(t)\n tout.pprint()\n tout.write('../data/stream_origin.fits', overwrite=True)", "def reformat_data(self, df, ids):\n data = np.zeros((len(ids), self.n_sample_rows + 1, self.n_features))\n idx = 0\n for i in ids:\n sample = df.loc[i]\n data[idx, 0:89, :] = sample.values\n data[idx, 89, :] = np.mean(sample.values)\n idx += 1\n return data", "def writeTotalIndex(map_data):\n ids = map_data['id']\n index = [0.0] * len(ids)\n\n colnames = ['cars', 'bikes', 'ages', 'parking', 'male_singles',\n 'female_singles', 'digging', 'freeparking']\n weights = [-0.5, 0.5, 0.1, -0.5, 1.0, 1.0, -1.0, 0.25]\n for colname, weight in zip(colnames, weights):\n values = map_data[colname]\n\n index += values * (weight / values.max())\n\n toJson('final', pd.DataFrame({'id': ids, 'counts': index}))", "def build_fisher_contingency_table(overlap_count, user_count, gene_count, count):\n table = np.zeros(shape=(2, 2))\n table[0, 0] = overlap_count\n table[0, 1] = user_count - overlap_count\n table[1, 0] = gene_count - overlap_count\n table[1, 1] = count - user_count - gene_count + overlap_count\n\n return table", "def per_sample_taxa_summaries(open_table, output_format):\n t = parse_biom_table(open_table)\n header = \"#taxon\\trelative_abundance\\n\"\n\n for v, id_, md in t.iter():\n with open(output_format % id_, 'w') as f:\n f.write(header)\n\n for sorted_v, taxa in \\\n sorted(zip(v, t.ids(axis='observation')))[::-1]:\n if sorted_v:\n f.write(\"%s\\t%f\\n\" % (taxa, sorted_v))", "def make_table(ranked_means):\n fp = open(\"table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c|}\n \\\\hline\n \\\\multicolumn{2}{|c||}{Slowest} & \\\\multicolumn{2}{|c|}{Fastest} \\\\\\\\ \\\\hline\n Feature & Rate & Feature & Rate \\\\\\\\ \\\\hline\n\"\"\")\n top_10 = ranked_means[0:10]\n bottom_10 = ranked_means[-10:]\n for ((f_rate, f_name),(s_rate,s_name)) in zip(top_10, bottom_10):\n f_name = f_name.split(\":\")[-1]\n f_name = f_name.rsplit(\" \", 1)[0] if f_name.endswith(\"(V)\") else f_name\n s_name = s_name.split(\":\")[-1]\n s_name = s_name.rsplit(\" \", 1)[0] if s_name.endswith(\"(V)\") else s_name\n fp.write(\" %s & %.2f & %s & %.2f \\\\\\\\ \\n\" % \\\n (f_name, f_rate, s_name, s_rate))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()\n\n fp = open(\"supp_meaning_table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c||l|c||l|c|}\n \\\\hline\n Meaning & Category & Meaning & Category & Meaning & Category & Meaning & Category\\\\\\\\ \\\\hline\n\n\"\"\")\n feature_names = [f.split(\":\")[-1] for (r,f) in ranked_means]\n feature_names.sort(key=lambda s: s.lower())\n col1 = feature_names[0:25]\n col2 = feature_names[25:50]\n col3 = feature_names[50:75]\n col4 = feature_names[75:]\n for a,b,c,d in zip(col1,col2,col3,col4):\n x,y,z,w = [get_meaning_category(i) or \"Excluded\" for i in (a,b,c,d)]\n # Lop off (V)s (we needed them above for get_meaning_category to work)\n a,b,c,d = [f.rsplit(\" \", 1)[0] if f.endswith(\"(V)\") else f for f in (a,b,c,d)]\n fp.write(\"%s & %s & %s & %s & %s & %s & %s & %s\\\\\\\\ \\n\" % (a, x, b, y, c, z, d, w))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()", "def build_table(type_, test_type, device_name, thresholds):\n x = PrettyTable() \n x.field_names = [device_name] + thresholds\n \n \"Chrome,\" + test_type + \",\" + str(notAfter_date) + \",\" + thresholds[index], \",fail\"\n \n ##read all Chromep entries\n ##get all test_type rows\n ##loop rows\n ##show table", "def summarize_as_table(self):\n h = human_readable_size\n h_throughput = human_readable_throughput\n table = [\n ['Total Time (seconds)', '%.3f' % self.total_time,\n self.std_dev_total_time],\n ['Maximum Memory', h(self.max_memory), h(self.std_dev_max_memory)],\n ['Maximum CPU (percent)', '%.1f' % self.max_cpu,\n self.std_dev_max_cpu],\n ['Maximum Sent Throughput', h_throughput(self.max_sent_throughput),\n h_throughput(self.max_sent_throughput)],\n ['Maximum Recv Throughput', h_throughput(self.max_recv_throughput),\n h_throughput(self.max_recv_throughput)],\n ['Average Memory', h(self.average_memory),\n h(self.std_dev_average_memory)],\n ['Average CPU (percent)', '%.1f' % self.average_cpu,\n self.std_dev_average_cpu],\n ['Average Sent Throughput',\n h_throughput(self.average_sent_throughput),\n h_throughput(self.average_sent_throughput)],\n ['Average Recv Throughput',\n h_throughput(self.average_recv_throughput),\n h_throughput(self.average_recv_throughput)],\n ]\n return tabulate(\n table,\n headers=[\n 'Metric over %s run(s)' % (self.total_files),\n 'Mean',\n 'Standard Deviation'\n ],\n tablefmt=\"grid\"\n )", "def fill_table(table, keep_unknowns=False):\n if not keep_unknowns:\n table.drop(table.index[table.phylum == \"unknown\"], inplace=True)\n table.drop(table.index[table.genus == \"unknown\"], inplace=True)\n table.drop(table.index[table.eggNOG == \"unknown\"], inplace=True)\n for cohort in table[\"cohort_origin\"].unique():\n cohort_table = table.loc[table[\"cohort_origin\"] == cohort]\n\n if cohort not in table_summary:\n table_summary[cohort] = {}\n\n if \"N\" not in table_summary[cohort]:\n table_summary[cohort][\"N\"] = 1\n else:\n table_summary[cohort][\"N\"] += 1\n\n if \"phylum\" not in table_summary[cohort]:\n table_summary[cohort][\"phylum\"] = pd.DataFrame(columns=[\"phylum\"])\n\n #count the number of unique values in the phylum column\n table_phylum = cohort_table.loc[table['genus'] != \"unknown\",['phylum']].apply(pd.Series.value_counts,axis=0)\n table_summary[cohort][\"phylum\"] = table_summary[cohort][\"phylum\"].add(table_phylum,fill_value=0)\n \n if \"genus\" not in table_summary[cohort]:\n table_summary[cohort][\"genus\"] = pd.DataFrame(columns=[\"genus\"])\n\n #count the number of unique values in the genus column\n table_genus = cohort_table.loc[table['genus'] != \"unknown\",['genus']].apply(pd.Series.value_counts,axis=0)\n table_summary[cohort][\"genus\"] = table_summary[cohort][\"genus\"].add(table_genus,fill_value=0)\n \n if \"eggNOG\" not in table_summary[cohort]:\n table_summary[cohort][\"eggNOG\"] = pd.DataFrame(columns=[\"eggNOG\"])\n\n index = 0\n cogs_table = {}\n\n #forced to do this by each row because one gene belongs to multiple cogs\n for row in cohort_table[\"eggNOG\"]:\n if \"gene_cog\" not in table_summary[cohort]:\n table_summary[cohort][\"gene_cog\"] = 0\n table_summary[cohort][\"gene_cog\"] += 1\n if \";\" in row:\n cogs = row.split(\";\")\n for cog in cogs:\n if cog not in cogs_table:\n cogs_table[cog] = 0\n cogs_table[cog] += 1\n else:\n if row not in cogs_table:\n cogs_table[row] = 0\n cogs_table[row] += 1\n\n #create data frame and create the table\n cog_pd = pd.DataFrame.from_dict(cogs_table, orient=\"index\")\n cog_pd.columns = [\"eggNOG\"]\n table_summary[cohort][\"eggNOG\"] = table_summary[cohort][\"eggNOG\"].add(cog_pd,fill_value=0)", "def get_summary_of_records(self):\n ids = self.get_saleman_ids()\n table = [\n [\"Seller name\",\"Number of sales\",\"Total Value ($)\"]\n ]\n for id in ids:\n table_id = [self.get_seller_name(id),self.get_number_of_sales(id),\n self.get_total_of_saleman(id)]\n table.append(table_id)\n data_table = AsciiTable(table)\n print(data_table.table)", "def build_table():\n with contextlib.ExitStack() as stack:\n files = [stack.enter_context(gzip.open(f, 'rt')) for f in sys.argv[1:]]\n iters = [(line.split() for line in f) for f in files]\n for it in iters:\n next(it)\n key = operator.itemgetter(0)\n table = []\n for k, g in itertools.groupby(merge(*iters, key=key), key=key):\n props = list(g)\n if len(props) == len(iters):\n table.append([k] + [x[1] for x in props])\n for snp in table:\n print(*snp)", "def count_instances(tbl, col2count, colcounted):\n counted_ser = tbl[col2count].value_counts()\n counted_df = pd.DataFrame(counted_ser, columns=[colcounted]).reset_index()\n counted_df.rename(columns={'index':col2count},inplace=True)\n tbl = tbl.merge(counted_df,on=col2count)\n return tbl", "def flatten(counts):\n single_names = {}\n long_names = {}\n for i in range(len(counts.items())):\n if(len(counts.items()[i][0].split(\" \")) <= 1):\n single_names[str(counts.items()[i][0])] = counts.items()[i][1]\n else:\n long_names[str(counts.items()[i][0])] = counts.items()[i][1]\n \n starter_list = [[[x[0]],x[1]] for x in long_names.items()]\n for i in range(len(single_names.items())):\n matched = False\n for j in range(len(starter_list)):\n if(single_names.items()[i][0] in starter_list[j][0][0].split(\" \")):\n starter_list[j][0].append(single_names.items()[i][0])\n starter_list[j][1] += single_names.items()[i][1]\n matched = True\n break\n \n if(matched == False):\n starter_list.append([[single_names.items()[i][0]], single_names.items()[i][1]]) \n \n \n return starter_list", "def count_prdctr_freqs(results, column_out):\n \n from collections import Counter\n \n c = Counter()\n for itm in results: \n item = itm\n if(isnumber(item)): \n item = [item]\n \n c.update(Counter(item))\n \n df_out = pd.DataFrame(list(dict(c).items()), columns=[column_out, 'Frequency (%)'])\n df_out = df_out.sort_values(by=['Frequency (%)'], ascending=False)\n df_out['Frequency (%)'] = (df_out['Frequency (%)']/results.shape[0])*100.\n \n return df_out", "def _load_sample_table(self):\n self.sampleTable = pd.read_table(self.config['sampletable'], sep='\\t', dtype=str)\n self.sampleTable.set_index('sampleID', inplace=True)\n self.samples = self.sampleTable.reset_index().to_dict('records')", "def column_creator(path):\n if not os.path.exists(path+'tables'):\n os.makedirs(path+'tables')\n\n\n # Sequences\n if os.path.exists(path+'SEQ.txt'):\n with open(os.path.join(path+'SEQ.txt')) as f1, open(os.path.join(path+'tables/sequences_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n # Modifications\n if os.path.exists(path + 'modifications.txt'):\n\n with open(os.path.join(path+'modifications.txt')) as f1, open(os.path.join(path+'tables/modifications_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n # Spectrum identify:\n if os.path.exists(path + 'spectrum_identify.txt'):\n\n with open(os.path.join(path+'spectrum_identify.txt')) as f1, open(path+'tables/spectrum_ide_table.txt', 'a') as f3:\n lines1 = f1.read().count('\\n')\n f3.write(\"%s\\n%s\\n\" % (\"Spectrum Number\",lines1))\n f1.close()\n f3.close()\n\n if os.path.exists(path + 'spectrum_unidentify.txt'):\n with open(os.path.join(path+'spectrum_unidentify.txt')) as f2, open(path+'tables/spectrum_unide_table.txt', 'a') as f3:\n lines2 = f2.read().count('\\n')\n f3.write(\"%s\\n%s\\n\" % (\"Spectrum Number\",lines2))\n f2.close()\n f3.close()\n\n if os.path.exists(path+'taxonomy_identify.txt'):\n # Taxonomy ide:\n with open(os.path.join(path+'taxonomy_identify.txt')) as f1, open(os.path.join(path+'tables/taxonomy_ide_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n\n if os.path.exists(path + 'taxonomy_unidentify.txt'):\n # Taxonomy unide:\n with open(os.path.join(path+'taxonomy_unidentify.txt')) as f1, open(os.path.join(path+'tables/taxonomy_unide_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()", "def stack_table(A: pd.DataFrame) -> pd.DataFrame:\r\n A = pd.DataFrame(A.stack(dropna=False))\r\n A.columns = ['factor']\r\n return A", "def tabulate(table):\n cw = {} # column widths\n\n # Trim leading and trailing whitespace from each element.\n for i, row in enumerate(table):\n for j, element in enumerate(row):\n table[i][j] = element.strip()\n\n # Find the max element width for each column.\n for row in table:\n for j, element in enumerate(row):\n cw[j] = max(cw.get(j, 0), len(element))\n\n # Reformat elements to align columns.\n for i, row in enumerate(table):\n for j, element in enumerate(row):\n table[i][j] = ' ' + element.ljust(cw[j]) + ' '", "def format(self, table):\n #return table.data.to_json()\n m = table.as_array()\n rank = len(m.shape)\n is_table = len(table.headers)<=5 or (len(table.headers)>5 and (table.headers[0] != '0' or table.headers[1] != '1' or table.headers[2] != '2' ))\n\n if rank<3 and is_table:\n v = []\n for i in range(len(table.headers)):\n vv = {\n 'offset': table.offset,\n 'header': table.headers[i],\n 'type': table.types[i],\n 'data': _replace_nans(m[:,i].tolist()) if rank>1 else _replace_nans(m.tolist()),\n }\n if table.sizes is not None:\n vv[\"size\"] = table.sizes[0]\n v.append(vv)\n else:\n # if hasattr(data, \"strip\") or \\\n # (not hasattr(data, \"__getitem__\") and \\\n # not hasattr(data, \"__iter__\")):\n # # data is not a list/tuple => wrap it\n # data = [ data ]\n v = {\n 'offset': table.offset,\n #'headers': table.headers,\n 'type': table.types[0],\n 'data': _replace_nans(m.tolist()),\n }\n if table.sizes is not None:\n v[\"size\"] = table.sizes\n\n return json.dumps(v, cls=ExtEncoder)", "def _formatsensorinfo():\n df = pd.DataFrame(columns=['Sensor', 'Resolution (m)', 'Band Count'])\n for i, (image, key) in enumerate(_sensor_info().items()):\n df.loc[i] = [image, key['resolution'], key['band_count']]\n\n return df", "def column_stats(self, df, prefix='', ignore=None):\n if ignore is None:\n ignore = {}\n\n ignore = ignore.union({'uuid', 'kf_id', 'created_at', 'modified_at',\n 'external_id'})\n TABLES = {}\n FIGURES = {}\n\n for col in set(df.columns) - ignore:\n counts = (df.groupby(col)[col]\n .count()\n .sort_values(ascending=False))\n if len(counts)> 0:\n f = plt.figure()\n counts.plot(kind='bar')\n plt.title(col)\n plt.tight_layout()\n f.savefig(self.output+'figures/{}.png'.format(col))\n plt.close(f)\n FIGURES[col] = self.output+'figures/{}.png'.format(col)\n\n TABLES[col] = pd.DataFrame(counts.values, counts.index, columns=['count'])\n TABLES[col].to_csv(self.output+'tables/{}{}.csv'\n .format(prefix+'_' if prefix else '', col))\n TABLES[col] = TABLES[col].reset_index().to_html(index=False)\n\n return FIGURES, TABLES", "def result_to_dataframe(data):\n letters, statistics = zip(*data)\n dataframe = pd.DataFrame(data=list(statistics), index=letters, columns=['SUM', 'SUM_OF_SQUARES', 'MAX', 'MIN', 'COUNT']).sort_index()\n dataframe['MEAN'] = dataframe['SUM'] / dataframe['COUNT']\n dataframe['VARIANCE'] = dataframe['SUM_OF_SQUARES'] / dataframe['COUNT'] - dataframe['MEAN']**2\n dataframe['STANDARD_DEVIATION'] = dataframe['VARIANCE']**0.5\n logging.info(\"Total datapoints read: {}.\".format(dataframe['COUNT'].sum()))\n return dataframe", "def pretty(self):\n #table = [\"\".join([\"%8s \" % s for s in self.alpha.getSymbols()])]\n table = []\n for row in PWM.getFreq(self):\n table.append(\"\".join([\"%8.6f \" % y for y in row]))\n return table", "def to_countvectors(self):\n if hasattr(self, \"ifp\"):\n df = self.to_dataframe()\n return to_countvectors(df)\n raise AttributeError(\"Please use the `run` method before\")", "def obj_value_counts(df):\n df_obj = obj_df(df)\n for col in df_obj.columns:\n print(df_obj[col].value_counts())\n print('-'*100)", "def expand_counts(counts):\n result = []\n for i, c in enumerate(counts):\n result.append(zeros(c, int) + i)\n return concatenate(result)", "def _prepare_stats_table(self, pinfos):\n\n stats_tbl = OrderedDict()\n stats_tbl[\"Title\"] = OrderedDict()\n for res in self.rsts:\n stats_tbl[res.reportid] = OrderedDict()\n\n for pinfo in pinfos:\n for colname in (pinfo[\"colname\"], pinfo[\"xcolname\"]):\n if colname in stats_tbl[\"Title\"]:\n continue\n\n # Each column name is represented by a row in the statistics table. Fill the \"Title\"\n # column.\n title_dict = stats_tbl[\"Title\"][colname] = OrderedDict()\n defs = self._refdefs.info[colname]\n\n if defs.get(\"unit\") == \"nanosecond\":\n # Convert nanoseconds to microseconds.\n unit = \"us\"\n else:\n unit = defs.get(\"short_unit\", \"\")\n\n title_dict[\"colname\"] = colname\n if unit:\n title_dict[\"colname\"] += f\", {unit}\"\n title_dict[\"coldescr\"] = defs[\"descr\"]\n\n title_dict[\"funcs\"] = OrderedDict()\n for funcname in self._stats_funcs:\n if funcname in self.rsts[0].cstats[colname]:\n title_dict[\"funcs\"][funcname] = RORawResult.get_stat_func_descr(funcname)\n\n # Now fill the values for each result.\n for res in self.rsts:\n res_dict = stats_tbl[res.reportid][colname] = OrderedDict()\n res_dict[\"funcs\"] = OrderedDict()\n\n for funcname in title_dict[\"funcs\"]:\n val = res.cstats[colname][funcname]\n fmt = \"{}\"\n if defs.get(\"unit\") == \"nanosecond\" and \"index\" not in funcname:\n val /= 1000\n fmt = \"{:.2f}\"\n if defs[\"type\"] == \"float\":\n fmt = \"{:.2f}\"\n\n fdict = res_dict[\"funcs\"][funcname] = OrderedDict()\n fdict[\"val\"] = fmt.format(val)\n fdict[\"raw_val\"] = val\n\n if self._refres.reportid == res.reportid:\n fdict[\"hovertext\"] = \"This is the reference result, other results \" \\\n \"are compared to this one.\"\n continue\n\n ref_fdict = stats_tbl[self._refres.reportid][colname][\"funcs\"][funcname]\n change = val - ref_fdict[\"raw_val\"]\n if ref_fdict[\"raw_val\"]:\n percent = (change / ref_fdict[\"raw_val\"]) * 100\n else:\n percent = change\n change = fmt.format(change) + unit\n percent = \"{:.1f}%\".format(percent)\n fdict[\"hovertext\"] = f\"Change: {change} ({percent})\"\n\n return stats_tbl", "def parameter_count_table(model: nn.Module, max_depth: int = 3) -> str:\n count: typing.DefaultDict[str, int] = parameter_count(model)\n # pyre-fixme[24]: Generic type `tuple` expects at least 1 type parameter.\n param_shape: typing.Dict[str, typing.Tuple] = {\n k: tuple(v.shape) for k, v in model.named_parameters()\n }\n\n # pyre-fixme[24]: Generic type `tuple` expects at least 1 type parameter.\n table: typing.List[typing.Tuple] = []\n\n def format_size(x: int) -> str:\n if x > 1e8:\n return \"{:.1f}G\".format(x / 1e9)\n if x > 1e5:\n return \"{:.1f}M\".format(x / 1e6)\n if x > 1e2:\n return \"{:.1f}K\".format(x / 1e3)\n return str(x)\n\n def fill(lvl: int, prefix: str) -> None:\n if lvl >= max_depth:\n return\n for name, v in count.items():\n if name.count(\".\") == lvl and name.startswith(prefix):\n indent = \" \" * (lvl + 1)\n if name in param_shape:\n table.append((indent + name, indent + str(param_shape[name])))\n else:\n table.append((indent + name, indent + format_size(v)))\n fill(lvl + 1, name + \".\")\n\n table.append((\"model\", format_size(count.pop(\"\"))))\n fill(0, \"\")\n\n old_ws = tabulate.PRESERVE_WHITESPACE\n tabulate.PRESERVE_WHITESPACE = True\n tab = tabulate.tabulate(\n table, headers=[\"name\", \"#elements or shape\"], tablefmt=\"pipe\"\n )\n tabulate.PRESERVE_WHITESPACE = old_ws\n return tab", "def make_lof_table(data_table, my_genes, my_samples, summary_func):\n table_header = [\"Gene\"] + my_samples + [\n \"Missense:Benign\", \"Missense:Possibly\", \"Missense:Probably\",\n \"MissenseNA\", \"Indel\", \"Nonsense\", \"Frameshift\", \"Splice-site\",\n \"Synonymous\"]\n table_records = []\n\n gs_lookup = group_data_by_gs(data_table)\n for gene in my_genes:\n synonymous = missense_benign = missense_possibly = missense_probably = \\\n missense_na = frameshift = nonsense = splice = indel = 0\n\n out_row = [gene]\n for sample in my_samples:\n normalized = [0]\n # Count mutations of each type for this gene and sample\n for entry in gs_lookup[gene][sample]:\n if entry['muttype'] == 'Silent':\n synonymous += 1\n continue\n if entry['muttype'] == 'Intron':\n # Shouldn't be here; ignore\n continue\n\n if entry['muttype'] == 'Missense_Mutation':\n if entry['consequence'] == 'benign':\n missense_benign += 1\n elif entry['consequence'] == 'possibly':\n missense_possibly += 1\n elif entry['consequence'] == 'probably':\n missense_probably += 1\n elif entry['consequence'] == 'NA':\n missense_na += 1\n else:\n print(\"Unhandled missense consequence level:\",\n entry['consequence'], file=sys.stderr)\n elif entry['muttype'] == 'Nonsense_Mutation':\n nonsense += 1\n elif entry['muttype'] == 'Splice_Site':\n splice += 1\n elif entry['muttype'] in ('Frame_Shift_Ins', 'Frame_Shift_Del'):\n frameshift += 1\n elif entry['muttype'] in ('In_Frame_Ins', 'In_Frame_Del'):\n indel += 1\n else:\n print(\"Unhandled mutation type:\", entry['muttype'],\n file=sys.stderr)\n continue\n\n normalized.append(entry['normalized'])\n # Summarize the normalized mutation counts for this gene and sample\n out_row.append(summary_func(normalized))\n out_row.extend((missense_benign, missense_possibly, missense_probably,\n missense_na, indel, nonsense, frameshift, splice,\n synonymous))\n table_records.append(out_row)\n\n return pandas.DataFrame.from_records(table_records, columns=table_header)", "def simple_format_table(table):\n s = [[str(e) for e in row] for row in table]\n lens = [max(map(len, col)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n return '\\n'.join(table)", "def print_statistics(samples, values=None, sample_labels=None, value_labels=None):\n num_vars, nsamples = samples.shape\n if values is None:\n values = np.empty((nsamples, 0))\n if values.ndim == 1:\n values = values[:, np.newaxis]\n num_qoi = values.shape[1]\n assert nsamples == values.shape[0]\n if sample_labels is None:\n sample_labels = ['z%d' % ii for ii in range(num_vars)]\n if value_labels is None:\n value_labels = ['y%d' % ii for ii in range(num_qoi)]\n data = [(label, s) for s, label in zip(samples, sample_labels)]\n data += [(label, s) for s, label in zip(values.T, value_labels)]\n\n # data = [(label, s) for s, label in zip(samples, sample_labels)]\n # data += [(label, s) for s, label in zip(values.T, value_labels)]\n # data = dict(data)\n # df = DataFrame(index=np.arange(nsamples), data=data)\n # print(df.describe())\n\n str_format = ' '.join([\"{:<6}\"]+[\"{:^10}\"]*(len(data)))\n print(str_format.format(*([\" \"]+[dat[0] for dat in data])))\n stat_funs = [lambda x: x.shape[0], lambda x: x.mean(), lambda x: x.std(),\n lambda x: x.min(), lambda x: x.max()]\n stat_labels = [\"count\", \"mean\", \"std\", \"min\", \"max\"]\n str_format = ' '.join([\"{:<6}\"]+[\"{:10.6f}\"]*(len(data)))\n for stat_fun, stat_label in zip(stat_funs, stat_labels):\n print(str_format.format(\n *([stat_label]+[stat_fun(dat[1]) for dat in data])))", "def start_table(self):\n self.col_widths = []\n self.result = \"\"", "def test_format_histograms(self):\r\n self.assertEqual(format_histograms(array([0, 1, 0, 2, 2, 3]),\r\n array(\r\n [2, 1, 0, 2, 0, 0]), array(\r\n [0, 0, 0, 2, 0, 1]),\r\n array(\r\n [100, 110, 120, 130, 140, 150, 160])),\r\n \"\"\"# bins raw sequence lengths, length of sequences that pass quality filters before processing, and lengths of sequences that pass quality filters post processing.\\nLength\\tRaw\\tBefore\\tAfter\\n100\\t0\\t2\\t0\\n110\\t1\\t1\\t0\\n120\\t0\\t0\\t0\\n130\\t2\\t2\\t2\\n140\\t2\\t0\\t0\\n150\\t3\\t0\\t1\"\"\")", "def summary_table(countries: List[str]):\n \n df_list = []\n \n for country in countries:\n acceleration_figures = acceleration(country)\n pop = COUNTRY_DATA[country]['population']\n df_list.append(\n [\n country,\n COUNTRY_DATA[country]['data'].confirmed[-1],\n int(acceleration_figures[0] * pop),\n COUNTRY_DATA[country]['data'].deaths[-1],\n int(acceleration_figures[1] * pop),\n ]\n )\n\n return df_list", "def get_counts(label, colorby, num_categories, dir_path, level, color_data,\r\n prefs, pref_colors, background_color, label_color, chart_type,\r\n generate_image_type, plot_width, plot_height,\r\n bar_width, dpi, raw_fpath, resize_nth_label, label_type,\r\n include_html_legend, include_html_counts):\r\n\r\n img_data = []\r\n labels = []\r\n level_counts = []\r\n\r\n sample_ids, otu_ids, otu_table = color_data\r\n labels = sample_ids\r\n\r\n # iterate over the counts table and cleanup taxa labels\r\n for idx, counts in enumerate(otu_table):\r\n taxonomy = otu_ids[idx]\r\n split_label = [i for i in taxonomy.strip().split(\";\")]\r\n taxonomy = ';'.join(split_label)\r\n level_counts.append((sum(map(float, counts)), taxonomy,\r\n '<br>'.join(split_label)))\r\n all_sum = sum([c_over[0] for c_over in level_counts])\r\n\r\n # get the fractions for all samples\r\n fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \\\r\n get_fracs(level_counts, num_categories, all_sum,\r\n chart_type, True)\r\n\r\n # if making pie charts we perform a couple extra steps, such as making a\r\n # total pie chart\r\n if chart_type == 'pie':\r\n # make the total pie chart\r\n img_data.extend(\r\n make_HTML_table(label, other_frac, all_sum, red, other_cat,\r\n fracs_labels_other, fracs_labels, dir_path,\r\n all_counts, level, prefs, pref_colors,\r\n background_color, label_color, chart_type,\r\n label, generate_image_type, plot_width,\r\n plot_height, bar_width, dpi, resize_nth_label,\r\n label_type, include_html_legend,\r\n include_html_counts))\r\n\r\n if colorby is not None:\r\n # in the case the user specifies only certain samples we need to\r\n # handle that case\r\n for i, l in enumerate(sample_ids):\r\n if l not in colorby:\r\n continue\r\n total = 0\r\n sample_counts = []\r\n for idx, counts in enumerate(otu_table):\r\n taxonomy = otu_ids[idx]\r\n split_label = [j for j in taxonomy.strip().split(\";\")]\r\n taxonomy = ';'.join(split_label)\r\n c = float(counts[i])\r\n if c > 0:\r\n total += c\r\n sample_counts.append((c, taxonomy,\r\n '<br>'.join(split_label)))\r\n\r\n # get fractions for specific samples\r\n fracs_labels_other, fracs_labels, all_counts,\\\r\n other_cat, red, other_frac = get_fracs(sample_counts,\r\n num_categories,\r\n total, chart_type, True)\r\n\r\n # make the per sample pie charts\r\n img_data.extend(make_HTML_table('_'.join([label, l.strip()]),\r\n other_frac, total, red, other_cat, fracs_labels_other,\r\n fracs_labels, dir_path, all_counts, level,\r\n prefs, pref_colors, background_color, label_color,\r\n chart_type, l.strip(\r\n ), generate_image_type,\r\n plot_width, plot_height, bar_width, dpi,\r\n resize_nth_label, label_type, include_html_legend,\r\n include_html_counts))\r\n\r\n # if making an area/bar chart we do not make per sample images, instead\r\n # we make a total chart only\r\n elif chart_type == 'area' or chart_type == 'bar':\r\n area_plot_arr = []\r\n area_plot_sample_ids = []\r\n area_plot_taxa_arr = []\r\n taxa_html = []\r\n total_area_table_out = []\r\n total_sums = []\r\n\r\n if colorby is not None:\r\n # in the case the user specifies only certain samples we need to\r\n # handle that case\r\n for i, l in enumerate(sample_ids):\r\n if l not in colorby:\r\n continue\r\n total = 0\r\n area_plot_sample_ids.append(l)\r\n sample_counts = []\r\n\r\n # iterate over the counts and cleanup taxa for this particular\r\n # fxn\r\n\r\n #add in zip\r\n for idx, counts in enumerate(otu_table):\r\n taxonomy = otu_ids[idx]\r\n split_label = [j for j in taxonomy.strip().split(\";\")]\r\n taxonomy = ';'.join(split_label)\r\n\r\n c = float(counts[i])\r\n total += c\r\n sample_counts.append(\r\n (c, taxonomy, '<br>'.join(split_label)))\r\n\r\n # get fractions for specific samples\r\n fracs_labels_other, fracs_labels, all_counts,\\\r\n other_cat, red, other_frac = get_fracs(sample_counts,\r\n len(sample_counts), total,\r\n chart_type, False)\r\n\r\n total_area_table_out.append(all_counts)\r\n total_sums.append(sum([float(i) for i in all_counts]))\r\n\r\n # get the percents for each taxa and sample\r\n area_plot_per = []\r\n area_plot_taxa = []\r\n for i in fracs_labels_other:\r\n area_plot_per.append(i[1])\r\n area_plot_taxa.append(i[0])\r\n\r\n area_plot_arr.append(area_plot_per)\r\n area_plot_taxa_arr.append(area_plot_taxa)\r\n\r\n # write out the data table html, since it is different than pie chart\r\n # data table\r\n taxa_html.append('<tr><th>' + l.strip() +\r\n '</th></tr>' + ''.join(all_counts) + '')\r\n\r\n data_table = zip(*total_area_table_out)\r\n\r\n # create link for raw data file\r\n data_html_str = '<table><tr class=ntitle><td><a href=\"%s\" target=\"_blank\">View Table (%s)</a></td></tr></table>' % \\\r\n (os.path.join('raw_data', os.path.split(raw_fpath)[-1]),\r\n os.path.splitext(raw_fpath)[-1])\r\n\r\n # create the output table\r\n data_html_str += '<table cellpadding=1 cellspacing=1 border=1 ' + \\\r\n 'style=\\\"text-align:center;border-color:white;' +\\\r\n 'border-style:groove;\\\">' + \\\r\n '<tr class=\\\"ntitle\\\"><td class=\\\"header\\\" colspan=\"2\"></td><td' +\\\r\n ' valign=\\\"bottom\\\" class=\\\"header\\\" colspan=\"2\">Total</td>'\r\n\r\n ct_head_row = '<tr class=ntitle>' + \\\r\n '<td valign=\\\"bottom\\\" ' + \\\r\n 'class=\\\"header\\\">Legend</td><td ' + \\\r\n 'valign=\\\"bottom\\\" class=\\\"header\\\">Taxonomy</td>' + \\\r\n '<td class=\\\"header\\\">count</td><td class=\\\"header\\\">%</td>'\r\n\r\n if not include_html_counts:\r\n # list all samples in the header\r\n for i in area_plot_sample_ids:\r\n data_html_str += '<td valign=bottom class=header>%s</td>' % (i)\r\n ct_head_row += '<td class=\\\"header\\\">%</td>'\r\n else:\r\n # list all samples in the header\r\n for i in area_plot_sample_ids:\r\n data_html_str += '<td colspan=\\\"2\\\" valign=\\\"bottom\\\" class=\\\"header\\\">%s</td>'\\\r\n % (i)\r\n ct_head_row += '<td class=\\\"header\\\">count</td><td class=\\\"header\\\">%</td>'\r\n\r\n data_html_str += '</tr>'\r\n ct_head_row += '</tr>'\r\n data_html_str += ct_head_row\r\n table_sum = sum(total_sums)\r\n\r\n # list taxa in first row\r\n for ct, dat in enumerate(otu_ids):\r\n tax = dat\r\n split_label = [i for i in tax.strip().split(\";\")]\r\n split_label[-1] = \"<a href=javascript:gg(\\'%s\\');>%s</a>\" % \\\r\n (split_label[-1].replace(' ', '+'),\r\n split_label[-1].replace(' ', '&nbsp;'))\r\n joined_label = ';'.join(split_label).replace('\"', '')\r\n row_sum = sum([float(i) for i in data_table[ct]])\r\n data_html_str += \"<tr><td class=\\\"normal\\\" bgcolor=\\\"%s\\\">&nbsp;&nbsp;</td><td style=\\\"text-align:left;\\\" class=\\\"normal\\\">%s</td><td class=\\\"normal\\\">%5.0f</td><td class=\\\"normal\\\">%5.1f&#37;</td>\"\\\r\n % (data_colors[pref_colors[tax]].toHex(), joined_label,\r\n row_sum, row_sum / table_sum * 100)\r\n\r\n # add the percent taxa for each sample\r\n for i, per_tax in enumerate(data_table[ct]):\r\n if float(per_tax) > 0:\r\n if not include_html_counts:\r\n data_html_str += '<td class=\\\"normal\\\" style=\\\"border-color:%s;\\\">%5.1f&#37;</td>' %\\\r\n (data_colors[pref_colors[tax]].toHex(),\r\n (float(per_tax) / total_sums[i] * 100))\r\n else:\r\n data_html_str += '<td class=\\\"normal\\\" style=\\\"border-color:%s;\\\">%5.0f</td><td class=\\\"normal\\\" style=\\\"border-color:%s;\\\">%5.1f&#37;</td>' %\\\r\n (data_colors[\r\n pref_colors[tax]].toHex(), float(per_tax),\r\n data_colors[pref_colors[tax]].toHex(),\r\n (float(per_tax) / total_sums[i] * 100))\r\n else:\r\n if not include_html_counts:\r\n data_html_str += '<td class=\\\"normal\\\">%5.1f&#37;</td>' % \\\r\n (float(per_tax) / total_sums[i] * 100)\r\n else:\r\n data_html_str += '<td class=\\\"normal\\\">%5.0f</td><td class=\\\"normal\\\">%5.1f&#37;</td>' % \\\r\n (float(per_tax), float(per_tax)\r\n / total_sums[i] * 100)\r\n\r\n data_html_str += '</tr>'\r\n\r\n data_html_str += '</table>'\r\n\r\n if include_html_counts:\r\n # add a note on the counts since they can be relative or absolute\r\n # values\r\n data_html_str += '<p><em>NOTE: the counts displayed pertain to either relative or absolute values depending on your selection from summarize_taxa.py. For relative values, the numbers are converted to integer, so counts below 0.5 appear as 0. Also, if you chose to display numeric data, the table headers may not be in the same order as the plot.</em></p>'\r\n\r\n # make sure that the taxa array is in the proper order\r\n for i in range(len(area_plot_taxa_arr) - 1):\r\n if area_plot_taxa_arr[i] != area_plot_taxa_arr[i + 1]:\r\n raise ValueError('The taxonomies are out of order!')\r\n\r\n # add data to the html output\r\n img_data.extend(make_HTML_table(area_plot_sample_ids,\r\n other_frac, all_sum, red, otu_ids, area_plot_arr,\r\n fracs_labels, dir_path, [' '.join(\r\n taxa_html)], level,\r\n prefs, pref_colors, background_color, label_color, chart_type,\r\n label, generate_image_type,\r\n plot_width, plot_height, bar_width, dpi, resize_nth_label,\r\n label_type, include_html_legend, include_html_counts))\r\n img_data.append(data_html_str)\r\n\r\n return img_data", "def tree2OTU_table(mvp_tree):\n series = []\n for terminal in mvp_tree.feature_tree.get_terminals():\n try:\n series.append(terminal.sample_series)\n except:\n print('there is no sample series in tree2OTU ')\n df = pd.dataframe(series)\n return df", "def prepare_class_freqs(cls_counts, n_classes):\n\n if None in cls_counts:\n return None\n\n lst_cls_counts = []\n\n for party_cls_counts in cls_counts:\n temp = [0] * n_classes\n for label, count in party_cls_counts.items():\n temp[int(label)] = int(count)\n\n lst_cls_counts.append(np.array(temp))\n\n return lst_cls_counts", "def makeTableNamesList(n, ):", "def fastqc_stats_table(self):\n \n headers = OrderedDict()\n headers['percent_duplicates'] = {\n 'title': '% Dups',\n 'description': '% Duplicate Reads',\n 'max': 100,\n 'min': 0,\n 'scale': 'RdYlGn-rev',\n 'format': '{:.1f}%'\n }\n headers['percent_gc'] = {\n 'title': '% GC',\n 'description': 'Average % GC Content',\n 'max': 80,\n 'min': 20,\n 'scale': 'PRGn',\n 'format': '{:.0f}%'\n }\n headers['avg_sequence_length'] = {\n 'title': 'Length',\n 'description': 'Average Sequence Length (bp)',\n 'min': 0,\n 'scale': 'RdYlGn',\n 'format': '{:.0f}'\n }\n headers['total_sequences'] = {\n 'title': 'M Seqs',\n 'description': 'Total Sequences (millions)',\n 'min': 0,\n 'scale': 'Blues',\n 'modify': lambda x: x / 1000000,\n 'shared_key': 'read_count'\n }\n self.general_stats_addcols(self.fastqc_stats, headers)", "def _generate_expected_summary_table():\n expected_summary = SummaryTable()\n # 1 pub/send per default emit period\n expected_summary.increment_pub()\n expected_summary.increment_send()\n return expected_summary" ]
[ "0.57405555", "0.56753397", "0.56595165", "0.56088334", "0.558755", "0.5580088", "0.55345875", "0.55330217", "0.5509548", "0.543421", "0.5379833", "0.5359173", "0.53470004", "0.52842575", "0.52725303", "0.5265805", "0.5256941", "0.52491903", "0.52491903", "0.52352023", "0.52184176", "0.5210343", "0.5186094", "0.5181094", "0.5177044", "0.51626515", "0.5162241", "0.516095", "0.516095", "0.51574534", "0.51536584", "0.5152442", "0.51489735", "0.51471347", "0.51438355", "0.51140195", "0.51136845", "0.51062876", "0.5095472", "0.5079911", "0.50647867", "0.50595415", "0.50550973", "0.5050915", "0.5038118", "0.50340736", "0.50127065", "0.50121963", "0.5000586", "0.49998978", "0.49988917", "0.49951482", "0.49846387", "0.4984482", "0.49806964", "0.4972534", "0.49673676", "0.4966203", "0.4961195", "0.4961049", "0.49516347", "0.49494374", "0.49492437", "0.49413514", "0.49373916", "0.4936244", "0.49353832", "0.49303764", "0.49238023", "0.49222514", "0.49116588", "0.49094725", "0.49001428", "0.4883092", "0.48812306", "0.48653036", "0.48583058", "0.48567784", "0.4851121", "0.48385784", "0.48343903", "0.48337904", "0.4830861", "0.48270935", "0.48222396", "0.4817311", "0.4812682", "0.481219", "0.4809277", "0.48054144", "0.4802383", "0.48004514", "0.4791193", "0.47888666", "0.47797054", "0.4772948", "0.4772133", "0.47721258", "0.47707912", "0.4770469" ]
0.627885
0
reformat fpkm table into flattened table of sample_names/values
def reformat_fpkmTable( self,analysis_id_I=None,sna2experimentID_I=None, sna2sns_I=None): if self.fpkmTable: fpkmTable = self.fpkmTable[:]; else: fpkmTable = []; fpkmTable_flat = self.reformat_countOrFPKMTable( countOrFPKMTable_I=fpkmTable, analysis_id_I=analysis_id_I, sna2experimentID_I=sna2experimentID_I, sna2sns_I=sna2sns_I, count_or_FPKM = 'fpkm'); return fpkmTable_flat;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_unifrac_sample_mapping(sample_ids, otu_ids, otu_table_array):\r\n out = []\r\n for i, row in enumerate(otu_table_array):\r\n for j, val in enumerate(row):\r\n if val > 0:\r\n line = [otu_ids[i], sample_ids[j], str(val)]\r\n out.append('\\t'.join(line))\r\n return out", "def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df", "def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df", "def from_multicsv(self,input_data):\n reformatted_data = []\n for (i,row) in enumerate(input_data):\n if i==0:\n headers = row\n else:\n data_row = {}\n for (j,h) in enumerate(headers):\n if j<len(row):\n data_row.update({h : row[j]})\n else:\n data_row.update({h : 0})\n reformatted_data.append(data_row)\n return reformatted_data", "def convert_to_table_format(package):\n tables = list()\n for primary_table_id in package.primary_table_ids:\n tables.append(StachExtensions.__generate_table(package, primary_table_id))\n return tables", "def print_tables(hash_table, f_output, l_samples):\n\n l_fields = ['chr', 'pos', 'ref', 'alt', 'QUAL', 'FILTER',\n 'Func.refGene', 'Gene.refGene', 'GeneDetail.refGene', 'ExonicFunc.refGene', 'AAChange.refGene',\n 'cytoBand', 'ExAC_ALL', 'ExAC_AFR', 'ExAC_AMR', 'ExAC_EAS', 'ExAC_FIN', 'ExAC_NFE', 'ExAC_OTH',\n 'ExAC_SAS',\n 'avsnp147', 'SIFT_score', 'SIFT_pred', 'Polyphen2_HDIV_score', 'Polyphen2_HDIV_pred',\n 'Polyphen2_HVAR_score',\n 'Polyphen2_HVAR_pred', 'LRT_score', 'LRT_pred', 'MutationTaster_score', 'MutationTaster_pred',\n 'MutationAssessor_score', 'MutationAssessor_pred', 'FATHMM_score', 'FATHMM_pred', 'PROVEAN_score',\n 'PROVEAN_pred', 'VEST3_score', 'CADD_raw', 'CADD_phred', 'DANN_score', 'fathmm-MKL_coding_score',\n 'fathmm-MKL_coding_pred', 'MetaSVM_score', 'MetaSVM_pred', 'MetaLR_score', 'MetaLR_pred',\n 'integrated_fitCons_score', 'integrated_confidence_value', 'GERP++_RS', 'phyloP7way_vertebrate',\n 'phyloP20way_mammalian', 'phastCons7way_vertebrate', 'phastCons20way_mammalian', 'SiPhy_29way_logOdds']\n l_fields = l_fields + l_samples\n \n l_chr = set([item[0] for item in hash_table.keys()])\n\n fo = open(f_output, 'w')\n fo.write(','.join(l_fields) + '\\n')\n for key in sorted(hash_table.keys(), key=itemgetter(1)):\n fo.write(','.join(map(lambda field: hash_table[key].get(field, '.'), l_fields)) + '\\n')\n fo.close()", "def tree2OTU_table(mvp_tree):\n series = []\n for terminal in mvp_tree.feature_tree.get_terminals():\n try:\n series.append(terminal.sample_series)\n except:\n print('there is no sample series in tree2OTU ')\n df = pd.dataframe(series)\n return df", "def to_frame(self):\n # Create a set of dictionaries/lists for each column\n data = dict([(i_var.name, []) for i_var in self.inputs])\n data.update({self.OUTPUT_LABEL: [], self.INPUT_LABEL: [], self.name: []})\n\n # A very ugly loop to produce all the probabilities in a nice way.\n # Note that this just reproduces what is already in `self.lookup`.\n # Honestly, I just haven't thought of a better way to get nice output.\n for i_index, i_state in enumerate(self.input_states):\n for o_var, results in zip(self.outputs, self.per_state_results):\n for o_state, o_p in enumerate(results[i_index]):\n for i_var, s in zip(self.inputs, i_state):\n data[i_var.name].append(s)\n data[self.OUTPUT_LABEL].append(o_var.name)\n data[self.INPUT_LABEL].append(o_state)\n data[self.name].append(o_p)\n all_data = pd.DataFrame(data=data)\n\n # The magnificent pivot table function does all the work\n return pd.pivot_table(data=all_data, values=[self.name],\n index=[i_var.name for i_var in self.inputs],\n columns=[self.OUTPUT_LABEL, self.INPUT_LABEL])", "def test_toTable(self):\r\n # Empty results.\r\n out_f = StringIO()\r\n self.res1.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(),\r\n \"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\\n\")\r\n out_f.close()\r\n\r\n # Results with multiple samples.\r\n exp = \"\"\"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\r\nS1\\t5\\t21\\t1.5\\t2.5\\t3.5\r\nS1\\t10\\t20\\t2.5\\t2.5\\t3.5\r\nS1\\t20\\t30\\t3.5\\t2.5\\t3.5\r\nS2\\t1\\t3\\t0.4\\t2.5\\t3.5\r\n\"\"\"\r\n out_f = StringIO()\r\n self.res2.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()\r\n\r\n # Custom header.\r\n exp = \"\"\"foo\\tbar\\tbaz\\tbazaar\\tbazaaar\\tbazaaaar\r\nS1\\t5\\t21\\t1.5\\t2.5\\t3.5\r\n\"\"\"\r\n out_f = StringIO()\r\n self.res1.addSample('S1', 42)\r\n self.res1.addSampleEstimate('S1', 5, 21, 1.5, 2.5, 3.5)\r\n self.res1.toTable(out_f,\r\n header=['foo', 'bar', 'baz', 'bazaar', 'bazaaar', 'bazaaaar'])\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()\r\n\r\n # Invalid header.\r\n with self.assertRaises(ValueError):\r\n out_f = StringIO()\r\n self.res1.toTable(out_f, header=['foo'])\r\n\r\n # Cells with None as their value.\r\n exp = \"\"\"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\r\nS1\\t43\\tN/A\\tN/A\\tN/A\\tN/A\r\n\"\"\"\r\n out_f = StringIO()\r\n res = RichnessEstimatesResults()\r\n res.addSample('S1', 42)\r\n res.addSampleEstimate('S1', 43, None, None, None, None)\r\n res.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()", "def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))", "def _finalize(self, sampled_data):\n final_data = dict()\n for table_name, table_rows in sampled_data.items():\n parents = self.metadata.get_parents(table_name)\n if parents:\n for parent_name in parents:\n foreign_key = self.metadata.get_foreign_key(parent_name, table_name)\n if foreign_key not in table_rows:\n parent_ids = self._find_parent_ids(table_name, parent_name, sampled_data)\n table_rows[foreign_key] = parent_ids\n\n reversed_data = self.metadata.reverse_transform(table_name, table_rows)\n\n fields = self.metadata.get_fields(table_name)\n\n final_data[table_name] = reversed_data[list(fields.keys())]\n\n return final_data", "def reformat_data(self, df, ids):\n data = np.zeros((len(ids), self.n_sample_rows + 1, self.n_features))\n idx = 0\n for i in ids:\n sample = df.loc[i]\n data[idx, 0:89, :] = sample.values\n data[idx, 89, :] = np.mean(sample.values)\n idx += 1\n return data", "def preprocessKNN(self):\n\n feature_list = []\n\n for index, row in self.all_data.iterrows():\n chans = cv2.split(row['image'])\n\n features = []\n for chan in chans:\n hist = cv2.calcHist(chan, [0], None, [64], [0,256])\n features.extend(hist)\n\n features = np.array(features).flatten()\n feature_list.append(features)\n\n df = self.all_data[['name', 'genre']].copy()\n\n feature_df = pd.DataFrame(feature_list)\n\n df = df.join(feature_df)\n\n return df", "def get_normalized_data_table(table_metadata, debug=False):\n suffix = table_metadata.get('suffix', '')\n data_table = table_metadata['table_class'](\n file_path=table_metadata['csv_filename'], suffix=suffix)\n drop_headers(table_metadata['document_label'], data_table.data)\n rename_headers(table_metadata['document_label'], data_table.data)\n print_data_table_length(table_metadata['document_label'],\n data_table.data,\n debug=debug)\n return data_table", "def __profile_to_table(data):\n output = [[\"condition\", \"mean\", \"min\", \"max\"]]\n order = data[\"order\"]\n\n for o in order:\n try:\n values = data[\"data\"][o]\n output.append(\n [o, str(mean(values)), str(min(values)), str(max(values))]\n )\n except Exception as e:\n print(e)\n\n return \"\\n\".join([\"\\t\".join(l) for l in output])", "def _get_table_from_samples(self, index):\n df = pd.DataFrame()\n for sample in self.samples:\n sd = sample.to_dict()\n ser = pd.Series(\n {k: v for (k, v) in list(sd.items()) if not k.startswith(\"_\")}\n )\n df = df.append(ser, ignore_index=True)\n index = [index] if isinstance(index, str) else index\n if not all([i in df.columns for i in index]):\n _LOGGER.debug(\n \"Could not set {} index. At least one of the \"\n \"requested columns does not exist: {}\".\n format(CFG_SAMPLE_TABLE_KEY, index))\n return df\n _LOGGER.debug(\"Setting sample_table index to: {}\".format(index))\n df.set_index(keys=index, drop=False, inplace=True)\n return df", "def test_format_unifrac_sample_mapping(self):\r\n a = [[1, 0, 0], [0, 2, 4], [7, 0, 9.0]]\r\n otu_ids = ['OTUa', 'OTUb', 'OTUc']\r\n sample_ids = ['Sa', 'Sb', 'Sc']\r\n result = format_unifrac_sample_mapping(sample_ids, otu_ids, a)\r\n self.assertEqual(\r\n result,\r\n ['OTUa\\tSa\\t1',\r\n 'OTUb\\tSb\\t2',\r\n 'OTUb\\tSc\\t4',\r\n 'OTUc\\tSa\\t7',\r\n 'OTUc\\tSc\\t9.0'])", "def _format_data(self):\n formatted_data = []\n\n for row in self._data_agg_by_mean_value.iterrows():\n \n car_make = row[0]\n mean_car_value = round(row[1][0], 2)\n formatted_data.append({'car_make': car_make, 'mean_car_value': mean_car_value})\n\n return formatted_data", "def reformat_countOrFPKMTable(\n self,\n countOrFPKMTable_I=None,\n analysis_id_I=None,\n sna2experimentID_I=None,\n sna2sns_I=None,\n count_or_FPKM = 'count'):\n #format into a dictionary of rows for quick aligning with the tracking_id\n countOrFPKMTable_flat = [];\n for row in countOrFPKMTable_I:\n for k,v in row.items():\n if k=='tracking_id':continue;\n tmp = {};\n tmp['analysis_id'] = analysis_id_I;\n tmp['tracking_id'] = row['tracking_id'];\n\n sample_name_lst = k.split('_');\n sample_name_base = '_'.join(sample_name_lst[:-1]);\n sample_name_rep = eval(sample_name_lst[-1]);\n if sna2experimentID_I: \n experiment_id = sna2experimentID_I[sample_name_base];\n else:\n experiment_id=None;\n tmp['experiment_id'] = experiment_id;\n if sna2sns_I: \n sample_name = sna2sns_I[sample_name_base][sample_name_rep];\n else:\n sample_name=k;\n tmp['sample_name'] = sample_name;\n\n tmp['value'] = v;\n tmp['value_units'] = count_or_FPKM;\n tmp['used_'] = True;\n tmp['comment_'] = None;\n countOrFPKMTable_flat.append(tmp);\n return countOrFPKMTable_flat;", "def tsv_samples_to_names(self, name='samples_to_names.tsv'):\n with open(self.a.out_dir + name, 'w') as handle:\n content = self.df_sample_names.to_csv(None, sep=self.sep, float_format=self.float_format)\n handle.writelines(content)", "def transform(filtered_list):\n out_put = {}\n out_list = []\n # loop to get the required columns, random ordered\n for item in filtered_list:\n for val in item._fields:\n if val in type_dict:\n out_put[val] = type_dict.get(val)(getattr(item, val))\n out_list.append(out_put)\n out_put = {}\n\n # loop to the ordered columns data as per output\n all_rows = []\n for item in out_list:\n tmp_row = []\n for key in type_dict.keys():\n out_put[key] = item[key]\n tmp_row.append(item[key])\n all_rows.append(tmp_row)\n\n col_row = [col.replace('_', '-') for col in type_dict.keys()]\n all_rows.insert(0, col_row)\n return all_rows", "def per_sample_taxa_summaries(open_table, output_format):\n t = parse_biom_table(open_table)\n header = \"#taxon\\trelative_abundance\\n\"\n\n for v, id_, md in t.iter():\n with open(output_format % id_, 'w') as f:\n f.write(header)\n\n for sorted_v, taxa in \\\n sorted(zip(v, t.ids(axis='observation')))[::-1]:\n if sorted_v:\n f.write(\"%s\\t%f\\n\" % (taxa, sorted_v))", "def generate_table(results):\n keyslist = list(results[0].keys())\n table = PrettyTable(keyslist)\n for dct in results:\n table.add_row([dct.get(c, \"\") for c in keyslist])\n return table", "def _load_sample_table(self):\n self.sampleTable = pd.read_table(self.config['sampletable'], sep='\\t', dtype=str)\n self.sampleTable.set_index('sampleID', inplace=True)\n self.samples = self.sampleTable.reset_index().to_dict('records')", "def transform(input):\n transformed_file = []\n\n for row in input:\n names = row['name'].split()\n row['fname'] = names[0]\n row['lname'] = names[1]\n del row['name']\n transformed_file.append(row)\n return transformed_file", "def table_gen(NamesL_pairs, p_pL, m_mL, p_mL, m_pL, p_valsL, p_vals_BonferoniL, RatiosL, p_valsL_divergent_convergent,\n p_valsL_divergent_convergent_BonferoniL, RatiosL_divergent_convergent, output_table):\n datafile = open(output_table, \"w\")\n datafile.write(\n \"Feature_1\" + '\\t' + \"Feature_2\" + \"\\t\" + \"plus_plus\" + '\\t' + \"minus_minus\" + '\\t' + \"plus_minus\" + '\\t' + \"minus_plus\" + '\\t' + \"p_value_same_opposite\" + '\\t' + \"p-value_same_opposite_Bonferoni_corrected\" + '\\t' + \"Ratio_same_opposite\" + '\\t' + \"p_value_divergent_convergent\" + '\\t' + \"p_value_divergent_convergent Bonferoni corrected\" + '\\t' + \"Ratio divergent convergent\" + '\\n')\n for i in range(len(NamesL_pairs)):\n datafile.write(\n NamesL_pairs[i][0] + '\\t' + NamesL_pairs[i][1] + '\\t' + str(p_pL[i]) + '\\t' + str(m_mL[i]) + '\\t' + str(\n p_mL[i]) + '\\t' + str(m_pL[i]) + '\\t' + str(p_valsL[i]) + '\\t' + str(p_vals_BonferoniL[i]) + '\\t' + str(\n RatiosL[i]) + '\\t' + str(p_valsL_divergent_convergent[i]) + '\\t' + str(\n p_valsL_divergent_convergent_BonferoniL[i]) + '\\t' + str(RatiosL_divergent_convergent[i]) + '\\n')\n datafile.close()\n return", "def split_otu_table_on_sample_metadata(otu_table_f, mapping_f, mapping_field):\r\n mapping_f = list(mapping_f)\r\n mapping_values = get_mapping_values(mapping_f, mapping_field)\r\n otu_table = parse_biom_table(otu_table_f)\r\n\r\n for v in mapping_values:\r\n v_fp_str = v.replace(' ', '_')\r\n sample_ids_to_keep = sample_ids_from_metadata_description(\r\n mapping_f, valid_states_str=\"%s:%s\" % (mapping_field, v))\r\n\r\n try:\r\n filtered_otu_table = otu_table.filterSamples(\r\n lambda values, id_, metadata: id_ in sample_ids_to_keep)\r\n except TableException:\r\n # all samples are filtered out, so no otu table to write\r\n continue\r\n yield v_fp_str, format_biom_table(filtered_otu_table)", "def formatOutput(tranisitionTable):\n \n # TODO: Make all the processing done in this function, rather than main\n for key in sorted(transitionTable.iterkeys()):\n transitionTable[key].sort(key = operator.itemgetter(1)) #.sort is in-place\n \n return transitionTable", "def make_table(ranked_means):\n fp = open(\"table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c|}\n \\\\hline\n \\\\multicolumn{2}{|c||}{Slowest} & \\\\multicolumn{2}{|c|}{Fastest} \\\\\\\\ \\\\hline\n Feature & Rate & Feature & Rate \\\\\\\\ \\\\hline\n\"\"\")\n top_10 = ranked_means[0:10]\n bottom_10 = ranked_means[-10:]\n for ((f_rate, f_name),(s_rate,s_name)) in zip(top_10, bottom_10):\n f_name = f_name.split(\":\")[-1]\n f_name = f_name.rsplit(\" \", 1)[0] if f_name.endswith(\"(V)\") else f_name\n s_name = s_name.split(\":\")[-1]\n s_name = s_name.rsplit(\" \", 1)[0] if s_name.endswith(\"(V)\") else s_name\n fp.write(\" %s & %.2f & %s & %.2f \\\\\\\\ \\n\" % \\\n (f_name, f_rate, s_name, s_rate))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()\n\n fp = open(\"supp_meaning_table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c||l|c||l|c|}\n \\\\hline\n Meaning & Category & Meaning & Category & Meaning & Category & Meaning & Category\\\\\\\\ \\\\hline\n\n\"\"\")\n feature_names = [f.split(\":\")[-1] for (r,f) in ranked_means]\n feature_names.sort(key=lambda s: s.lower())\n col1 = feature_names[0:25]\n col2 = feature_names[25:50]\n col3 = feature_names[50:75]\n col4 = feature_names[75:]\n for a,b,c,d in zip(col1,col2,col3,col4):\n x,y,z,w = [get_meaning_category(i) or \"Excluded\" for i in (a,b,c,d)]\n # Lop off (V)s (we needed them above for get_meaning_category to work)\n a,b,c,d = [f.rsplit(\" \", 1)[0] if f.endswith(\"(V)\") else f for f in (a,b,c,d)]\n fp.write(\"%s & %s & %s & %s & %s & %s & %s & %s\\\\\\\\ \\n\" % (a, x, b, y, c, z, d, w))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()", "def print_tsv(data, filename):\n with open(filename, 'wt') as fout:\n writefile = partial(print, sep='\\t', file=fout)\n writefile('Sample', *expected_header)\n for sample in data:\n for entry in data[sample]:\n writefile(sample, *(entry[field] for field in expected_header))", "def matrix2Table(self, matrix): \n M = TableModel()\n M.addColumn('Mutations')\n\n fields = matrix.columnHeaders()\n for f in fields:\n M.addColumn(f)\n i = matrix.indexOfColumnWithHeader('Mutations')\n for row in matrix:\n mutationSet = Core.Data.MutationSet(row[i])\n code = '+'.join(mutationSet.mutationCodes(reduced=True))\n M.addRow(code)\n for f in fields:\n j = matrix.indexOfColumnWithHeader(f)\n if f == 'Mutations':\n M.data[code]['Mutations'] = code\n else: \n M.data[code][f] = str(row[j])\n return M", "def format(self, table):\n #return table.data.to_json()\n m = table.as_array()\n rank = len(m.shape)\n is_table = len(table.headers)<=5 or (len(table.headers)>5 and (table.headers[0] != '0' or table.headers[1] != '1' or table.headers[2] != '2' ))\n\n if rank<3 and is_table:\n v = []\n for i in range(len(table.headers)):\n vv = {\n 'offset': table.offset,\n 'header': table.headers[i],\n 'type': table.types[i],\n 'data': _replace_nans(m[:,i].tolist()) if rank>1 else _replace_nans(m.tolist()),\n }\n if table.sizes is not None:\n vv[\"size\"] = table.sizes[0]\n v.append(vv)\n else:\n # if hasattr(data, \"strip\") or \\\n # (not hasattr(data, \"__getitem__\") and \\\n # not hasattr(data, \"__iter__\")):\n # # data is not a list/tuple => wrap it\n # data = [ data ]\n v = {\n 'offset': table.offset,\n #'headers': table.headers,\n 'type': table.types[0],\n 'data': _replace_nans(m.tolist()),\n }\n if table.sizes is not None:\n v[\"size\"] = table.sizes\n\n return json.dumps(v, cls=ExtEncoder)", "def preprocess_feature(df):", "def to_tsv(self, out_dir, sep=\"\\t\", prefix=None, **kwargs):\n os.makedirs(out_dir, exist_ok=True) # create dirs if non-existent\n prefix = f\"{prefix}_\" if prefix else \"\"\n fpaths = [\n os.path.join(out_dir, f\"{prefix}{suf}.tsv\")\n for suf in [\"data\", \"sample_meta\"]\n ]\n self.data.to_csv(fpaths[0], sep=\"\\t\", **kwargs)\n self.sample_meta.to_csv(fpaths[1], sep=\"\\t\", **kwargs)", "def prepare_batch_sample_set_for_metadata_export(path, tsca_id):\n raw = pd.read_table(path)\n print( \"%d Samples in this batch\" % raw.shape[0] )\n\n # Create dfs to upload\n all_samples = pd.concat([pd.DataFrame(index=raw.index, columns=['membership:sample_set_id'], data=tsca_id), \\\n raw[ ['sample_id', 'sample_type'] ]], axis=1)\n\n\n tumors = all_samples.loc[ all_samples['sample_type'] == \"Tumor\", ['membership:sample_set_id', 'sample_id'] ]\n tumors.loc[: , 'membership:sample_set_id'] = \"%s_T\"%tsca_id\n \n normals = all_samples.loc[ all_samples['sample_type'] == \"Normal\", ['membership:sample_set_id', 'sample_id'] ]\n normals.loc[: , 'membership:sample_set_id'] = \"%s_N\"%tsca_id\n\n all_samples = all_samples.drop('sample_type', axis=1)\n return (all_samples, tumors, normals)", "def transform_instances_table_output(result):\n\n table_output = []\n for item in result:\n table_output.append(_transform_instance_row(item))\n\n return table_output", "def build_table():\n with contextlib.ExitStack() as stack:\n files = [stack.enter_context(gzip.open(f, 'rt')) for f in sys.argv[1:]]\n iters = [(line.split() for line in f) for f in files]\n for it in iters:\n next(it)\n key = operator.itemgetter(0)\n table = []\n for k, g in itertools.groupby(merge(*iters, key=key), key=key):\n props = list(g)\n if len(props) == len(iters):\n table.append([k] + [x[1] for x in props])\n for snp in table:\n print(*snp)", "def normalize_table(input_fp, output_fp, relative_abund, presence_absence,\n axis):\n table = load_table(input_fp)\n result = _normalize_table(table, relative_abund, presence_absence, axis)\n\n write_biom_table(result, 'hdf5' if HAVE_H5PY else 'json', output_fp)", "def reverse_transform(self, data):\n table = []\n\n for i in range(self.metadata['num_features']):\n column_data = data['f%02d' % i]\n column_metadata = self.metadata['details'][i]\n\n if column_metadata['type'] == 'value':\n column = self.continous_transformer.inverse_transform(column_data, column_metadata)\n\n if column_metadata['type'] == 'category':\n self.categorical_transformer.classes_ = column_metadata['mapping']\n column = self.categorical_transformer.inverse_transform(\n column_data.ravel().astype(np.int32))\n\n table.append(column)\n\n result = pd.DataFrame(dict(enumerate(table)))\n result.columns = self.columns\n return result", "def summarize_sample_props(psd_list, sample_list):\n prop_list = [psd.sample_props for psd in psd_list]\n cols = ['amplicon median', 'mean size', 'lower size', 'upper size']\n\n return pd.DataFrame(prop_list, columns=cols, index=sample_list)", "def pivot(self):\n\t\tpivoted = Dataset()\n\t\tfor (index, header) in enumerate(self.headers):\n\t\t\tfor row in self.rows:\n\t\t\t\tif index == 0:\n\t\t\t\t\tpivoted.headers.append(row[header])\n\t\t\t\telse:\n\t\t\t\t\tif len(pivoted.rows) < index:\n\t\t\t\t\t\tpivoted.rows.extend([{} for x in range(index - len(pivoted.rows))])\n\t\t\t\t\tpivoted.rows[index - 1][row[self.headers[0]]] = row[header]\n\t\treturn pivoted", "def tabulate(d, transpose=False, thousands=True, key_fun=None, sep=\",\", align=True):\n pairs = d.keys()\n rows, cols = zip(*pairs)\n if transpose:\n rows, cols = cols, rows\n\n rows = sorted(set(rows))\n cols = sorted(set(cols))\n header = [\"o\"] + list(cols)\n table = []\n for r in rows:\n combo = [(r, c) for c in cols]\n if transpose:\n combo = [(c, r) for (r, c) in combo]\n data = [d.get(x, \"n/a\") for x in combo]\n data = [\"{0:.1f}\".format(x) if isinstance(x, float) else x for x in data]\n if key_fun:\n data = [key_fun(x) for x in data]\n table.append([str(r)] + data)\n\n if not align:\n formatted = load_csv(header, table, sep=sep)\n return \"\\n\".join(formatted)\n\n return loadtable(header, table, thousands=thousands)", "def converttable(tablecode):\n table = etree.XML(tablecode)\n rows = iter(table)\n headers = [col.text for col in next(rows)]\n data = []\n for row in rows:\n values = [col.text for col in row]\n debugprint(dict(zip(headers, values)), \"RAW JSON\")\n data.append(dict(zip(headers, values)))\n return data", "def normalize_to_flat(classifier, df, col):\n name = str(classifier) + '_df'\n new_df= json_normalize(df.loc[classifier][col])\n new_df['classifier'] = [classifier]\n return new_df", "def flatten_data(self, data):\n for row in data:\n flat_row = {k: self._flatten_value(v) for k, v in row.items()}\n yield flat_row", "def generate_table(self, rows):\n ...", "def convert_quick_table(result):\n headline = result.split('\\n',1)[0]\n names, converters = MastCasJobs.get_converters(headline, delimiter=',')\n tab = ascii.read(MastCasJobs.replacenull(result,delimiter=','),\n guess=False,fast_reader=False,format='csv',\n names=names,converters=converters)\n return tab", "def tab_delim_table(self):\n self.generate()\n\n header = ' \\t '.join([r'{: ^7}'.format(col) for col in self.columns])\n lines = []\n for row in self.rows:\n bits = []\n for col in self.columns:\n if col in self.formatters:\n bits.append(self.formatters[col].format(row[col]))\n else:\n bits.append(self.formatters.get(col, '{: ^7}').format(row[col] if row[col] else ''))\n lines.append(' \\t '.join(bits))\n\n return \"{}\\n{}\".format(header, '\\n'.join(lines))", "def normalize_values(self, data):\n\n df = pd.DataFrame(data[1:], columns = data[0]).astype(str)\n\n df = df.replace(ami_md_constants.NAS)\n\n df = df.replace(ami_md_constants.REGEX_REPLACE_DICT, regex=True)\n df = df.replace(ami_md_constants.STRING_REPLACE_DICT)\n df['source.object.format_type'] = df['source.object.format'].map(ami_md_constants.FORMAT_TYPE)\n\n for key in ami_md_constants.MEASURE_UNIT_MAPS.keys():\n value_map = ami_md_constants.MEASURE_UNIT_MAPS[key]\n df = self.map_value(df,\n value_map['from_column'],\n value_map['to_column'],\n value_map['constant_value'],\n value_map['values_map_column'],\n value_map['values_map'])\n\n #force all the numerics back to numeric, and drop all empty columns\n df = df.apply(pd.to_numeric, errors='ignore').dropna(axis=1, how = \"all\")\n\n vals = df.values.tolist()\n cols = df.columns.tolist()\n vals.insert(0, cols)\n\n return vals", "def make_summary_tables( res ):\n\n # transform second table to csv and read this as a dataFrame\n result_fit_df = pd.read_csv(StringIO( res.tables[1].as_csv() ), sep=\",\",index_col=0)\n result_fit_df.columns = [i.strip() for i in result_fit_df.columns]\n result_fit_df.index = [i.strip() for i in result_fit_df.index]\n\n # first table is trickier because the data is spread on to columns, and there is title line\n L = res.tables[0].as_html().split('\\n')\n L.pop(1) # get rid of the title\n tmp = pd.read_html('\\n'.join(L) , header=None)[0] # read as a dataframe, but with 4 columns \n\n names = list(tmp[0]) + list(tmp[2])[:-2] # columns 0 and 2 are metric names\n values = list(tmp[1]) + list(tmp[3])[:-2] # columns 1 and 3 are the corresponding values\n # NB : I exclude the last 2 elements which are empty \n \n result_general_df = pd.DataFrame( {'Name': names , 'Value' : values}, index = names , columns=['Value'] )\n \n return result_general_df , result_fit_df", "def make_pivot_table(data, index_name, columns, values, afffunc):\n\n pvt = data.pivot_table(index=[index_name], columns=[columns], values=values, aggfunc=afffunc)\n return pvt", "def make_table_from_exploded_df(exploded_df: pd.DataFrame, row_heading_cols, column_heading_cols, dfs_dict=None,\n value_col: str = \"text\", concat_with: str = \" | \",\n convert_numeric_items=False, sort_headers=True) -> pd.DataFrame:\n for heading_col in (row_heading_cols + column_heading_cols):\n exploded_df[heading_col].fillna(\"\", inplace=True)\n table = exploded_df.pivot_table(index=row_heading_cols, columns=column_heading_cols, values=value_col,\n aggfunc=(lambda a: concat_with.join(a)))\n\n if type(table) == pd.Series:\n table = table.to_frame()\n row_nones = [None for _ in range((table.index.nlevels))]\n col_nones = [None for _ in range((table.columns.nlevels))]\n\n table = table.rename_axis(index=row_nones, columns=col_nones)\n\n if convert_numeric_items:\n num_rows, num_cols = _infer_numeric_rows_cols(exploded_df, row_heading_cols, column_heading_cols)\n table = convert_cols_to_numeric(table, num_cols, num_rows)\n\n if sort_headers and column_heading_cols != [\"column_index\"] and len(\n column_heading_cols) == 1 and dfs_dict is not None:\n col_headings = table.columns.to_list()\n cols = dfs_dict[\"col_headers\"][dfs_dict[\"col_headers\"][\"text\"].isin(col_headings)]\n cols = cols.sort_values(\"column_index_begin\")\n col_headings_sorted = cols[\"text\"].to_list()\n\n not_inc_headings = [x for x in col_headings if x not in col_headings_sorted]\n col_headings_sorted = col_headings_sorted + not_inc_headings\n table = table[col_headings_sorted]\n\n elif sort_headers and column_heading_cols != [\"column_index\"] and len(\n column_heading_cols) > 1 and dfs_dict is not None:\n headers = table.columns.to_frame()\n sorted_headers_arr = _order_multiindex(headers, column_heading_cols, 0, dfs_dict[\"col_headers\"])\n sorted_headers = [tuple(header) for header in sorted_headers_arr]\n not_inc_headings = [x for x in table.columns.to_list() if x not in sorted_headers]\n sorted_headers = sorted_headers + not_inc_headings\n table = table[sorted_headers]\n\n if sort_headers and row_heading_cols != [\"row_index\"] and len(row_heading_cols) == 1 and dfs_dict is not None:\n row_headings = table.index.to_list()\n rows = dfs_dict[\"row_headers\"][dfs_dict[\"row_headers\"][\"text\"].isin(row_headings)]\n rows = rows.sort_values(\"row_index_begin\")\n row_headings_sorted = rows[\"text\"].to_list()\n\n not_inc_headings = [x for x in row_headings if x not in row_headings_sorted]\n row_headings_sorted = row_headings_sorted + not_inc_headings\n\n table = table.reindex(row_headings_sorted)\n\n elif sort_headers and row_heading_cols != [\"column_index\"] and len(row_heading_cols) > 1 and dfs_dict is not None:\n headers = table.index.to_frame()\n sorted_headers_arr = _order_multiindex(headers, row_heading_cols, 0, dfs_dict[\"row_headers\"])\n sorted_headers = [tuple(header) for header in sorted_headers_arr]\n\n not_inc_headings = [x for x in table.index.to_list() if x not in sorted_headers]\n sorted_headers = sorted_headers + not_inc_headings\n table = table.reindex(sorted_headers)\n\n return table", "def _buildtable(self):\n\n tabrows = []\n\n for i, (expid, exfiles) in enumerate(self._exposure_files.items()):\n specflux_b, specflux_r, specflux_z = [], [], []\n tab = None\n\n if len(exfiles) == 0:\n continue\n\n print(expid)\n for exfile in exfiles:\n print(exfile)\n hdu = fits.open(exfile)\n\n # The following tables are present in the redux sframes and the\n # nightwatch qcframes.\n wave = hdu['WAVELENGTH'].data\n\n # However, in the nightwatch files the wavelength data are a\n # table of size nfiber x nwavelength.\n if self._filetype == 'nightwatch':\n if wave.ndim > 1:\n wave = wave[0]\n\n fluxhead = hdu['FLUX'].header\n fluxdata = hdu['FLUX'].data\n ivardata = hdu['IVAR'].data\n fibermap = hdu['FIBERMAP'].data\n exptime = fluxhead['EXPTIME']\n if not np.all(self._unditherfa['FIBER'] ==\n np.arange(len(self._unditherfa))):\n raise ValueError('weird fiberassign file format!')\n fibermap = self._unditherfa[fibermap['FIBER']]\n\n target_id = fibermap['TARGETID']\n target_ra = fibermap['TARGET_RA']\n target_dec = fibermap['TARGET_DEC']\n fiber = fibermap['FIBER']\n objtype = fibermap['OBJTYPE']\n flux_g = fibermap['FLUX_G']\n flux_r = fibermap['FLUX_R']\n flux_z = fibermap['FLUX_Z']\n x, y = [fibermap['FIBERASSIGN_{}'.format(val)] for val in ('X', 'Y')]\n\n camera = fluxhead['CAMERA'][0].upper()\n\n if getattr(self, '_deltara', None) is not None:\n dra = self._deltara[i]*np.ones(len(fiber))\n ddec = self._deltadec[i]*np.ones(len(fiber))\n elif self._dithertype == 'telescope':\n dithra = self._ditherfa['target_ra']\n dithdec = self._ditherfa['target_dec']\n udithra = self._unditherfa['target_ra']\n udithdec = self._unditherfa['target_dec']\n ontarget = ((self._ditherfa['targetid'] ==\n self._unditherfa['targetid']) &\n (self._ditherfa['objtype'] == 'TGT'))\n dfiberra = (dithra-udithra)*np.cos(np.radians(udithdec))*60*60\n dfiberdec = (dithdec-udithdec)*60*60\n if not np.all(self._ditherfa['FIBER'] ==\n np.arange(len(self._ditherfa))):\n raise ValueError('unexpected shape of dither file')\n dfiberra[~ontarget] = np.nan\n dfiberdec[~ontarget] = np.nan\n dfiberra = dfiberra[fiber]\n dfiberdec = dfiberdec[fiber]\n wcs = self.lookup_wcs(fluxhead['MJD-OBS'])\n centralwcs = self._central_wcs\n if (~np.isfinite(centralwcs['cenra'][1]) or\n ~np.isfinite(centralwcs['cendec'][1])):\n raise ValueError('central pointing ra/dec is NaN!')\n dtelra = (wcs['cenra'][1]-centralwcs['cenra'][1])\n dtelra *= np.cos(np.radians(centralwcs['cendec'][1]))\n dteldec = wcs['cendec'][1]-centralwcs['cendec'][1]\n dra = dfiberra + dtelra*60*60\n ddec = dfiberdec + dteldec*60*60\n if np.all(~np.isfinite(dra)):\n print('warning: no good telescope offset for %s' %\n exfile)\n else:\n raise ValueError('not implemented')\n \n for j, fiber_id in enumerate(fiber):\n flux = fluxdata[j]\n ivar = ivardata[j]\n if not np.any(ivar > 0):\n specflux = 0\n specflux_ivar = 0\n else:\n meanivar = np.mean(ivar[ivar > 0])\n mask = ivar > meanivar / 100\n specflux = np.trapz(flux*mask, wave)\n specflux_ivar = 1./np.sum(ivar[mask]**-1)\n # Schlegel: sum over correct wavelengths, all three\n # filters, plus 11 pixel median filter to reject\n # cosmics.\n # will require being better about reading in\n # the spectrographs together.\n tabrows.append((expid, exptime,\n target_id[j], target_ra[j], target_dec[j],\n fiber[j], objtype[j],\n flux_g[j], flux_r[j], flux_z[j],\n specflux, specflux_ivar, camera,\n dra[j], ddec[j],\n x[j], y[j]))\n\n tab = Table(rows=tabrows,\n names=('EXPID', 'EXPTIME',\n 'TARGETID', 'TARGET_RA', 'TARGET_DEC',\n 'FIBER', 'OBJTYPE',\n 'FLUX_G', 'FLUX_R', 'FLUX_Z',\n 'SPECTROFLUX', 'SPECTROFLUX_IVAR', 'CAMERA',\n 'DELTA_X_ARCSEC', 'DELTA_Y_ARCSEC',\n 'XFOCAL', 'YFOCAL'),\n meta={'EXTNAME' : 'DITHER',\n 'TILEID' : '{}'.format(self._tileid)})\n\n return tab", "def reformat_attrTable(\n self):\n #format into a dictionary of rows for quick aligning with the tracking_id\n if self.attrTable: attrTable = self.attrTable[:];\n else: attrTable = [];\n\n attrTable_dict = {};\n for row in attrTable:\n attrTable_dict[row['tracking_id']] = row;\n return attrTable_dict;", "def join_gene_tables(gene_tables,output,verbose=None):\n \n gene_table_data={}\n start_column_id=\"\"\n samples=[]\n file_basenames=[]\n index=0\n for gene_table in gene_tables:\n \n if verbose:\n print(\"Reading file: \" + gene_table)\n \n lines=util.process_gene_table_with_header(gene_table, allow_for_missing_header=True)\n header=next(lines)\n \n # get the basename of the file\n file_basename='.'.join(os.path.basename(gene_table).split('.')[:-1])\n file_basenames.append(file_basename)\n \n if header:\n header_info=header.split(GENE_TABLE_DELIMITER)\n if not start_column_id:\n start_column_id=header_info[0]\n # allow for multiple samples\n sample_names=header_info[1:]\n else:\n # if there is no header in the file then use the file name as the sample name\n sample_names=[file_basename]\n \n for line in lines:\n data=line.split(GENE_TABLE_DELIMITER)\n try:\n gene=data[0]\n # if the header names multiple samples, merge all samples\n # this prevents extra columns from being included in some rows\n # this requires files containing multiple samples to include a header\n data_points=data[1:len(sample_names)+1]\n except IndexError:\n gene=\"\"\n\n if gene:\n current_data=gene_table_data.get(gene,\"\")\n fill = index - current_data.count(GENE_TABLE_DELIMITER)\n if fill > 0:\n # fill in zeros for samples without data then add data point\n gene_table_data[gene]=current_data + GENE_TABLE_DELIMITER.join([\"0\"]*fill) + GENE_TABLE_DELIMITER + GENE_TABLE_DELIMITER.join(data_points) + GENE_TABLE_DELIMITER\n elif fill < 0:\n # add data point to other data point from the same sample\n current_data_points=current_data.split(GENE_TABLE_DELIMITER)\n for i,point in enumerate(data_points):\n store_index=len(data_points)*-1-1+i\n current_data_points[store_index]=str(float(current_data_points[store_index])+float(point))\n gene_table_data[gene] = GENE_TABLE_DELIMITER.join(current_data_points)\n else:\n # add data point to end of list\n gene_table_data[gene] = current_data + GENE_TABLE_DELIMITER.join(data_points) + GENE_TABLE_DELIMITER\n \n samples+=sample_names\n index+=len(sample_names)\n # if all of the header names for the files are the same\n # then use the file names as headers\n if samples.count(samples[0]) == len(samples):\n samples=file_basenames\n \n # write the joined gene table\n if not start_column_id:\n start_column_id=\"# header \"\n sample_header=[start_column_id]+samples\n total_gene_tables=len(samples)\n sorted_gene_list=util.fsort(list(gene_table_data))\n try:\n file_handle=open(output,\"w\")\n file_handle.write(GENE_TABLE_DELIMITER.join(sample_header)+\"\\n\")\n except EnvironmentError:\n sys.exit(\"Unable to write file: \" + output) \n \n for gene in sorted_gene_list:\n # extend gene data for any gene that is not included in all samples\n current_data=gene_table_data[gene]\n fill = total_gene_tables - current_data.count(GENE_TABLE_DELIMITER)\n if fill:\n current_data=current_data + GENE_TABLE_DELIMITER.join([\"0\"]*fill) + GENE_TABLE_DELIMITER\n file_handle.write(gene+GENE_TABLE_DELIMITER+current_data.rstrip(GENE_TABLE_DELIMITER)+\"\\n\")\n \n file_handle.close()", "def getAllSampleFields(self, sample_id, study_id):\n sample_tables = []\n sample_tables.append('sample')\n sample_tables.append('common_fields')\n sample_tables.append('extra_sample_')\n sample_tables.append('air')\n sample_tables.append('other_environment')\n sample_tables.append('sediment')\n sample_tables.append('soil')\n sample_tables.append('wastewater_sludge')\n sample_tables.append('water')\n sample_tables.append('host_assoc_vertibrate')\n sample_tables.append('host_associated_plant')\n sample_tables.append('human_associated')\n sample_tables.append('host_sample')\n sample_tables.append('host')\n \n filled_fields = {}\n \n con = self.getMetadataDatabaseConnection()\n cursor = con.cursor()\n \n for table in sample_tables:\n if 'extra_sample_' in table:\n statement = 'select * from %s%s where sample_id = %s' % (table, study_id, sample_id)\n elif table == 'host':\n statement = 'select * from host h inner join host_sample hs on h.host_id = hs.host_id where sample_id = %s' % sample_id\n else:\n statement = 'select * from %s where sample_id = %s' % (table, sample_id)\n \n try:\n cursor.execute(statement)\n except Exception, e:\n print str(e)\n print 'Error running query:\\n'\n print statement\n print 'Running next query...\\n'\n \n continue\n \n data = cursor.fetchall()\n\n # Get the column names\n col_names = []\n for i in range(0, len(cursor.description)):\n col_names.append(cursor.description[i][0])\n \n # Find the rows with data\n for row in data:\n i = 0\n for field in row:\n if field != None and field != '':\n filled_fields[col_names[i]] = field\n i += 1\n \n return filled_fields", "def columnar(table: list[dict[str, str]]) -> dict[str, list[str]]:\n result: dict[str, list[str]] = {}\n keys = table[0].keys()\n for key in keys:\n result[key] = column_values(table, key)\n return result", "def gen_tab(cat):\n\n col = ['FLUX_APER2','FLUX_APER4','FLUX_APER5','FLUX_APER8','FLUX_APER10','FLUX_APER14',\n 'MAG_APER2','MAG_APER4','MAG_APER5','MAG_APER8','MAG_APER10','MAG_APER14',\n 'MAG_AUTO','MAG_PETRO','KRON_RADIUS',\n 'PETRO_RADIUS','FLUX_MAX','ISOAREAF_IMAGE','x',\n 'y','ra','dec','X2_IMAGE','Y2_IMAGE','XY_IMAGE',\n 'THETA_IMAGE','X2WIN_IMAGE','Y2WIN_IMAGE','XYWIN_IMAGE','AWIN_IMAGE','BWIN_IMAGE',\n 'THETAWIN_IMAGE','AWIN_WORLD','BWIN_WORLD','THETAWIN_WORLD',\n 'MU_MAX','FLAGS','FWHM_IMAGE','ELONGATION','SEX_CLASS','FLUX_RADIUS25',\n 'FLUX_RADIUS50','FLUX_RADIUS85','FLUX_RADIUS95','FLUX_RADIUS99']\n print('generating features table: {}'.format(cat))\n tab = pd.read_table(cat,skiprows=41,sep=r'\\s+',header=None, names=col)\n\n # crop the image for just using the central part of the image\n tab = crop(tab)\n\n # add concentration column by subtracting mag10 by mag5, rejecting the detections with negative concentration\n tab['CONCENT'] = tab.MAG_APER5 - tab.MAG_APER10\n tab = tab[tab.CONCENT > 0]\n\n # normalizing the columns\n print('normalizing features...')\n seesq_norm = ['X2_IMAGE','Y2_IMAGE','X2WIN_IMAGE',\n 'Y2WIN_IMAGE','XY_IMAGE','XYWIN_IMAGE',\n 'ISOAREAF_IMAGE']\n see_norm = ['AWIN_WORLD','AWIN_WORLD','FWHM_IMAGE',\n 'KRON_RADIUS','PETRO_RADIUS','FLUX_RADIUS25',\n 'FLUX_RADIUS50','FLUX_RADIUS85',\n 'FLUX_RADIUS95','FLUX_RADIUS99']\n mag_norm = ['MAG_APER4','MAG_APER5','MAG_APER8',\n 'MAG_APER10','MAG_APER14','MAG_AUTO',\n 'MAG_PETRO','MU_MAX','CONCENT']\n flux_norm = ['FLUX_APER2','FLUX_APER4','FLUX_APER5',\n 'FLUX_APER8','FLUX_APER10','FLUX_APER14']\n fwhm_mean = tab.FWHM_IMAGE.mean()\n for seesq_col in seesq_norm:\n tab[seesq_col] = tab[seesq_col] / (fwhm_mean**2)\n for see_col in see_norm:\n tab[see_col] = tab[see_col] / fwhm_mean\n for mag_col in mag_norm:\n tab[mag_col] = tab[mag_col] * tab['MAG_APER2']\n for flux_col in flux_norm:\n tab[flux_col] = tab[flux_col] * tab['FLUX_MAX']\n tab['CONCENT'] = -1 * tab['CONCENT']\n\n # add column for galactic latitude\n print('calculating galactic latitude...')\n ra = np.array(tab['ra'].values)\n dec = np.array(tab['dec'].values)\n pos = SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='icrs')\n tab['b'] = list(pos.galactic.b.deg)\n\n tab.drop(['MAG_APER2','FLUX_MAX','x','y'], axis=1, inplace=True)\n tab.to_csv(cat[:-4]+'.csv', index=False, header=True)", "def make_lof_table(data_table, my_genes, my_samples, summary_func):\n table_header = [\"Gene\"] + my_samples + [\n \"Missense:Benign\", \"Missense:Possibly\", \"Missense:Probably\",\n \"MissenseNA\", \"Indel\", \"Nonsense\", \"Frameshift\", \"Splice-site\",\n \"Synonymous\"]\n table_records = []\n\n gs_lookup = group_data_by_gs(data_table)\n for gene in my_genes:\n synonymous = missense_benign = missense_possibly = missense_probably = \\\n missense_na = frameshift = nonsense = splice = indel = 0\n\n out_row = [gene]\n for sample in my_samples:\n normalized = [0]\n # Count mutations of each type for this gene and sample\n for entry in gs_lookup[gene][sample]:\n if entry['muttype'] == 'Silent':\n synonymous += 1\n continue\n if entry['muttype'] == 'Intron':\n # Shouldn't be here; ignore\n continue\n\n if entry['muttype'] == 'Missense_Mutation':\n if entry['consequence'] == 'benign':\n missense_benign += 1\n elif entry['consequence'] == 'possibly':\n missense_possibly += 1\n elif entry['consequence'] == 'probably':\n missense_probably += 1\n elif entry['consequence'] == 'NA':\n missense_na += 1\n else:\n print(\"Unhandled missense consequence level:\",\n entry['consequence'], file=sys.stderr)\n elif entry['muttype'] == 'Nonsense_Mutation':\n nonsense += 1\n elif entry['muttype'] == 'Splice_Site':\n splice += 1\n elif entry['muttype'] in ('Frame_Shift_Ins', 'Frame_Shift_Del'):\n frameshift += 1\n elif entry['muttype'] in ('In_Frame_Ins', 'In_Frame_Del'):\n indel += 1\n else:\n print(\"Unhandled mutation type:\", entry['muttype'],\n file=sys.stderr)\n continue\n\n normalized.append(entry['normalized'])\n # Summarize the normalized mutation counts for this gene and sample\n out_row.append(summary_func(normalized))\n out_row.extend((missense_benign, missense_possibly, missense_probably,\n missense_na, indel, nonsense, frameshift, splice,\n synonymous))\n table_records.append(out_row)\n\n return pandas.DataFrame.from_records(table_records, columns=table_header)", "def construct_table(self):\n table_str = self.header_row\n row_lbls, col_lbls = self.get_idxvals()\n for r,rlbl in enumerate(row_lbls):\n row_data = [self.data[rlbl,clbl] for clbl in col_lbls]\n table_str += self.construct_row(r, row_data)\n \n return table_str", "def format_bgc_metadata(df,float_id):\n mdf = df[bgc_metadata_columns]\n bgc_metadata_dict = {}\n for col in list(mdf):\n bgc_metadata_dict[col] = list(pd.Series(mdf[col].unique()).astype(str).str.strip())\n bgc_metadata_dict[col] = list(pd.Series(mdf[col].unique()).astype(str).str.strip().replace(\"'\",'\"'))\n bgc_metadata_dict = json.dumps(bgc_metadata_dict) \n bgc_metadata_df = pd.DataFrame({\"float_id\": [float_id], \"Metadata_Dict\": [bgc_metadata_dict]})\n return bgc_metadata_df", "def parse_distmat_to_dict(table):\r\n\r\n col_headers, row_headers, data = parse_matrix(table)\r\n assert(col_headers == row_headers)\r\n\r\n result = defaultdict(dict)\r\n for (sample_id_x, row) in zip(col_headers, data):\r\n for (sample_id_y, value) in zip(row_headers, row):\r\n result[sample_id_x][sample_id_y] = value\r\n return result", "def df_sample_names(self):\n return self.abundance_mat_mult(True)", "def pivot_scenarios(df, prefix, scen_map, df_type=\"pop\"):\n df[\"scenario\"] = prefix + \"_\" + df_type + \"_\" +\\\n df[\"scenario\"].map(scen_map)\n df = df.pivot_table(values=[\"lower\", \"mean\", \"upper\" ,\"value\"],\n index=\"location_id\",\n columns=\"scenario\",\n aggfunc=\"first\").reset_index()\n # This flattens the column levels\n df.columns = ['_'.join(col) for col in df.columns.values if col]\n df.rename(columns={\"location_id_\":\"location_id\"}, inplace=True)\n\n return df", "def get_sample_colnames(ms_df: DF) -> List[str]:\n\n sample_numbers = get_sample_numbers(ms_df)\n\n target_sample_cols = list()\n for sample in sample_numbers:\n for col in SAMPLE_COLS:\n target_sample_cols.append('{attr}_{sample}'.format(attr=col, sample=sample))\n return target_sample_cols", "def pretty(self):\n #table = [\"\".join([\"%8s \" % s for s in self.alpha.getSymbols()])]\n table = []\n for row in PWM.getFreq(self):\n table.append(\"\".join([\"%8.6f \" % y for y in row]))\n return table", "def _dump_table(table: Model, directory: Path, format_: str):\n try:\n table.select().tuples()\n table.fields()\n dataset = tablib.Dataset(*table.select().tuples(), headers=table.fields())\n except:\n print(table._meta.database.get_columns(table.table_name()))\n\n if directory is not None:\n print(f\" Dumping {table.table_name()}...\")\n out_file = Path(directory) / f\"{table.table_name()}.{format_}\"\n out_file.write_text(dataset.export(format_))\n print(\" Done.\")\n print(\"=====================\")\n else:\n print(dataset.export(\"csv\"))", "def permute_table(dtable):\n shuffle_field(dtable, 'gene')\n shuffle_field(dtable, 'sample')\n shuffle_field(dtable, 'Normalized')\n if 'Filler' in dtable:\n del dtable['Filler']", "def experiment_list_table_format(result):\n table = []\n for item in result:\n table.append(experiment_show_table_format(item))\n return table", "def get_split_summary_table(all_df, train_df, test_df):\n table = PrettyTable()\n table.field_names = ['set', 'N total', 'N non-ICH', 'N ICH', 'frac non-ICH', 'frac ICH']\n for df, name in zip([all_df, train_df, test_df],['All', 'Train', 'Test']):\n table.add_row([name, len(df), len(df[df.Hemorrhage == 0]), len(df[df.Hemorrhage == 1]),\n f'{len(df[df.Hemorrhage == 0])/len(df):.3%}', f'{len(df[df.Hemorrhage == 1])/len(df):.3%}'])\n return table", "def convert_data(test_data,params,list_dict,rational_present=True,topk=2):\n \"\"\"input: params -- input dict, list_dict -- previous predictions containing rationals\n rational_present -- whether to keep rational only or remove them only\n topk -- how many words to select\"\"\"\n \n temp_dict={}\n for ele in list_dict:\n temp_dict[ele['annotation_id']]=ele['rationales'][0]['soft_rationale_predictions']\n \n test_data_modified=[]\n \n for index,row in tqdm(test_data.iterrows(),total=len(test_data)):\n try:\n attention=temp_dict[row['Post_id']]\n except KeyError:\n continue\n topk_indices = sorted(range(len(attention)), key=lambda i: attention[i])[-topk:]\n new_text =[]\n new_attention =[]\n if(rational_present):\n if(params['bert_tokens']):\n new_attention =[0]\n new_text = [101]\n for i in range(len(row['Text'])):\n if(i in topk_indices):\n new_text.append(row['Text'][i])\n new_attention.append(row['Attention'][i])\n if(params['bert_tokens']):\n new_attention.append(0)\n new_text.append(102)\n else:\n for i in range(len(row['Text'])):\n if(i not in topk_indices):\n new_text.append(row['Text'][i])\n new_attention.append(row['Attention'][i])\n test_data_modified.append([row['Post_id'],new_text,new_attention,row['Label']])\n\n df=pd.DataFrame(test_data_modified,columns=test_data.columns)\n return df", "def export_data(fp, app_name):\n from otree.views.admin import get_display_table_rows\n colnames, rows = get_display_table_rows(\n app_name, for_export=True, subsession_pk=None)\n colnames = ['{}.{}'.format(k, v) for k, v in colnames]\n writer = csv.writer(fp)\n writer.writerows([colnames])\n writer.writerows(rows)", "def print_table(source, count=False):\n table_value = []\n table_header = []\n for source_key, source_value in source.items():\n for item in source_value:\n table_value.append([v for v in item.values()])\n table_header.append([k for k in item.keys()])\n if not count:\n print(tabulate(table_value,\n headers=table_header[0],\n tablefmt='orgtbl'))\n else:\n print(tabulate([[len(source_value)]],\n headers=[source_key],\n tablefmt='orgtbl'))", "def read_table(table_name, hf, df_fmt, ilo, ihi):\n dfs = []\n for dt, block in df_fmt.groupby(\"dtype\"):\n \n # check if this dtype contains waveform data\n if 'waveform' in block['name'].values:\n wf_group = f\"/{table_name}/waveform\"\n wf_block = read_waveforms(wf_group, hf, df_fmt, ilo, ihi)\n wf_rows, wf_cols = wf_block.shape\n nrows = wf_rows\n \n # get number of additional columns\n new_cols = [c for c in list(block[\"name\"].values) if c != 'waveform']\n newcols = len(new_cols)\n \n # allocate the full numpy array for this dtype\n np_block = np.empty((nrows, newcols + wf_cols), dtype=dt)\n np_block[:, newcols:] = wf_block\n \n cols = []\n for i, col in enumerate(new_cols):\n ds = hf[f\"{table_name}/{col}\"] \n \n if ihi is None:\n ihi = ds.shape[0]\n nwfs = ihi - ilo + 1 # inclusive\n \n np_block[:, i] = ds[ilo:ihi]\n cols.append(col)\n cols.extend(np.arange(wf_cols)) \n\n dfs.append(pd.DataFrame(np_block, columns=cols))\n \n # read normal 'array<1>{real}' columns\n else:\n ncols = len(block)\n nrows = block[\"size\"].unique()\n if len(nrows) > 1:\n print('Error, columns are different lengths')\n exit()\n nrows = nrows[0]\n np_block = np.empty((nrows, ncols), dtype=dt)\n \n for i, col in enumerate(block[\"name\"]):\n ds = hf[f\"{table_name}/{col}\"]\n np_block[:,i] = ds[...]\n \n dfs.append(pd.DataFrame(np_block, columns=block[\"name\"])) \n \n # concat final DF after grouping dtypes and avoiding copies\n return pd.concat(dfs, axis=1, copy=False)", "def change_food_names_in_final_table(df):\n # df = pd.read_excel(\"final_dataset_with_median_all.xlsx\")\n df.reset_index(drop=True, inplace=True)\n\n foods = df['food_names']\n food_examples = []\n indices = list(range(0, len(foods)))\n for i in indices:\n print(i)\n food_examples.append(str(foods[i]) + str(i))\n food_examples = pd.Series(food_examples)\n\n df.drop(labels=['food_names'], axis=\"columns\", inplace=True)\n df['food_names'] = food_examples\n writer = pd.ExcelWriter('final_dataset_with_median.xlsx', engine='xlsxwriter')\n df.to_excel(writer, sheet_name='Sheet1')\n writer.save()", "def _gen_table_rows(self):\n row_labels = self._get_padded_row_labels()\n column_labels = self._get_padded_column_labels()\n for row in zip(*column_labels):\n yield ''.join('<td>%s</td>' % c for c in row)\n for label, row_string in zip(row_labels, HeatMap._gen_table_rows(self)):\n yield ''.join('<td>%s</td>' % c for c in label) + row_string", "def basic_table_details():\n tbl: pa.table = pa.Table.from_pylist([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ])\n\n results = {\n 'column_names': tbl.column_names,\n 'columns > map > combine_chunks > to_pylist': [col.combine_chunks().to_pylist() for col in tbl.columns],\n 'nbytes': tbl.nbytes,\n 'num_columns': tbl.num_columns,\n 'num_rows': tbl.num_rows,\n 'schema': tbl.schema,\n 'shape': tbl.shape,\n }\n\n print(results)", "def tranform_data(args):\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen \n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == \"pkl\":\n with open(Path(args.data_dir, \"fx_labels\"), \"rb\") as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range (len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:(maxsize)]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))", "def get_table_data(data):\n logger.info('Generating summary table')\n\n table_dict = data.T.to_dict()\n table = []\n for date in sorted(data.index):\n table.append(table_dict[date])\n table.sort(key=lambda x: x['DATE'], reverse=True)\n for ii in range(len(table)):\n for k, v in table[ii].items():\n if pd.isnull(v):\n table[ii][k] = None\n return table", "def create_table(build_list):\n # extract list of files:\n all_files = build_list.split(',')\n\n output_file = os.getcwd() + \"/fpkm_table.tbl\"\n verbalise(\"B\", \"saving built table to file \", output_file)\n\n awk_cmd = \"\"\"awk '!($10~/FPKM/){\\\n gene_sample[$1,FILENAME]=$9;\\\n samples[FILENAME]=1;genes[$1]=1}\\\n END{printf \"%s\\t\", \"genes\";\\\n for(g in genes){printf \"%s\\t\", g};print \"\";\\\n for(s in samples){printf \"%s\\t\",s;\\\n for(g in genes){printf \"%s\\t\", gene_sample[g,s]};\\\n print \"\"}}' \"\"\"\n\n full_cmd = awk_cmd + \" \".join(all_files) + \">\" + str(output_file)\n\n # build table:\n os.system(full_cmd)\n\n # shorten filenames within table for easier reading!\n #output\n\n return output_file", "def tabular_data(self):\n path = CFG.GRAPHS_DIR\n chdir(path)\n\n if self.experiment_count == 1:\n f = open(self.tablefile, 'w')\n f.write(self.print_border_line())\n f.write(self.table_header())\n f.write(self.print_border_line())\n f.write(self.pretty_string(\"Functions\"))\n f.write(self.pretty_string(\"Batch Size\"))\n f.write(self.pretty_string(\"Training (%)\"))\n f.write(self.pretty_string(\"Testing (%)\", True))\n f.write('\\n')\n f.write(self.print_border_line())\n f.close()\n\n f = open(self.tablefile, 'a')\n f.write(self.pretty_string(self.function_name))\n f.write(self.pretty_string(str(self.batch_size)))\n f.write(self.pretty_string(self.tr_mean_str))\n f.write(self.pretty_string(self.test_mean_str, True))\n f.write('\\n')\n f.close()", "def parse_trflp(lines):\r\n\r\n sample_ids = []\r\n otu_ids = []\r\n data = []\r\n non_alphanum_mask = re.compile('[^\\w|^\\t]')\r\n # not sure why the above regex doesn't cover the following regex...\r\n dash_space_mask = re.compile('[_ -]')\r\n\r\n for i, line in enumerate(lines):\r\n elements = line.strip('\\n').split('\\t')\r\n\r\n # special handling for the first line only\r\n if i == 0:\r\n # validating if the file has a header\r\n if elements[0] == '':\r\n for otu_id in elements[1:]:\r\n otu_ids.append(non_alphanum_mask.sub('_', otu_id))\r\n continue\r\n else:\r\n for j, otu_id in enumerate(elements[1:]):\r\n otu_ids.append(non_alphanum_mask.sub('_', 'Bin%3d' % j))\r\n\r\n # handling of all other lines\r\n current_row = []\r\n\r\n # converting each value in the row to int\r\n for count in elements[1:]:\r\n try:\r\n current_row.append(int(round(float(count), 0)))\r\n except ValueError:\r\n current_row.append(0)\r\n\r\n # if the sum of all the values is equial to 0 ignore line\r\n if sum(current_row) == 0:\r\n continue\r\n\r\n # adding sample header to list\r\n sample_ids.append(non_alphanum_mask.sub('.',\r\n dash_space_mask.sub('.', elements[0])))\r\n\r\n # validating the size of the headers to add missing columns\r\n # this is only valid when there is no header\r\n if len(current_row) > len(otu_ids):\r\n # modify header data\r\n extra_cols = []\r\n for j in range(len(otu_ids), len(current_row)):\r\n extra_cols.append(non_alphanum_mask.sub('_', 'Bin%3d' % j))\r\n # modify data\r\n for j in range(len(data)):\r\n data[j].extend([0] * (len(current_row) - len(otu_ids)))\r\n\r\n otu_ids.extend(extra_cols)\r\n elif len(current_row) < len(otu_ids):\r\n # modify data\r\n current_row.extend([0] * (len(otu_ids) - len(current_row)))\r\n\r\n data.append(current_row)\r\n\r\n return sample_ids, otu_ids, asarray(data).transpose()", "def data_in(input_filename, sample_id):\n \n print('Processing sample {}'.format(sample_id))\n # Encoding set to latin1 due to presence of degree symbol\n # newlines in CC2 logger details fields will cause issues\n header_rows = 30\n df_param = pd.read_table(input_filename, \n nrows=header_rows, \n encoding=\"latin1\", \n header=None)\n df_val = pd.read_table(input_filename, skiprows=header_rows)\n df_param_indexed = df_param.set_index(0)\n# redundant due to parsing sample id from filename\n# sample_id = df_param_indexed.loc['Sample ID', 1]\n \n d1 = {1: pd.Series(['', ''], index=['Sample ID', 'Label'])}\n df_val_params = pd.DataFrame(d1)\n df_val_params.loc['Sample ID', 1] = sample_id\n df_val_params.loc['Label', 1] = sample_id\n\n return df_param_indexed, df_val, df_val_params", "def convert_list_of_freqs_to_dataframe(referanse):\n res = []\n for x in referanse:\n res.append( dict(x))\n result = pd.DataFrame(res).transpose()\n normalize_corpus_dataframe(result)\n return result", "def get_sample_ids(maptable):\n return [line[0] for line in maptable[1:]]", "def generate_table(self, outtablename,\n cols=['A', 'B', 'AB'],\n generateTable=True):\n if generateTable:\n new_indices = ['time (s)', 'mean counts']\n for idx in self.data[cols].describe().index[2:]:\n new_indices.append(idx)\n outTable = self.data[cols].describe()\\\n .set_index(pd.Index(new_indices))\n outTable.to_latex(\n self.tables_dir + outtablename + \".tex\", float_format=\"%d\")\n print(\"Outtable: \", outTable)", "def flatten_df(df):\n flat_array = df.values.flatten()\n flat_df = pd.DataFrame(flat_array)\n flat_df.columns = [\"loan\"]\n flat_df[\"row_no\"] = flat_df.reset_index().index\n flat_df = flat_df[[\"row_no\", \"loan\"]]\n flat_df.row_no = flat_df.row_no // 100\n return flat_df", "def format_predictions(self, predictions):\n output = predictions.copy()\n\n column_names = predictions.columns\n\n for col in column_names:\n if col not in {'forecast_time', 'identifier'}:\n output[col] = predictions[col]\n\n return output", "def columnar(list_table_rows: list[dict[str, str]]) -> dict[str, list[str]]:\n column_oriented_table: dict[str, list[str]] = {}\n first_row: dict[str, str] = list_table_rows[0]\n for column in first_row:\n column_oriented_table[column] = column_values(list_table_rows, column)\n return column_oriented_table", "def _normalize_dataset(self):\n\n new_data = []\n columns = ['user_id', 'movie_id', 'rating']\n for line in self.data_file['users']:\n movies_by_user = [\n {'user_id': line['user_id'], 'movie_id': movie_id, 'rating': 5}\n for movie_id in line['movies']\n ]\n new_data.extend(movies_by_user)\n return pd.DataFrame(new_data, columns=columns)", "def tabulate(self):\n for test_name, test in self.test_types.items():\n for ivs_name, ivs in self.ivs.items():\n if self.verbose:\n print(\"{0}: {1}\".format(ivs_name, test_name))\n tree = test(ivs)\n if not tree:\n continue\n score = tree.score(True)\n if self.verbose > 1:\n tree.print_structure()\n\n self.result_matrix['ivs name'][ivs_name][test_name] = score\n self.result_matrix['test type'][test_name][ivs_name] = score", "def summary_scores(fold_scores_list): \n d = {}\n nulls = {}\n for idx, score in enumerate(fold_scores_list):\n d.update({f\"{idx+1}0 samples:\": score[0].T})\n nulls.update({f\"{idx+1}0 samples:\": score[1].T})\n \n summary_scores = pd.concat(d.values(), axis=1, keys=d.keys()).T\n summary_nulls = pd.concat(nulls.values(), axis=1, keys=nulls.keys()).T\n return summary_scores, summary_nulls", "def to_table(self):\n table = Table()\n table['THETA_LO'] = Quantity([self.counts.offset[:-1]], unit=self.counts.offset.unit)\n table['THETA_HI'] = Quantity([self.counts.offset[1:]], unit=self.counts.offset.unit)\n table['ENERG_LO'] = Quantity([self.counts.energy[:-1]], unit=self.counts.energy.unit)\n table['ENERG_HI'] = Quantity([self.counts.energy[1:]], unit=self.counts.energy.unit)\n table['counts'] = self.counts.to_table()['data']\n if self.counts.data_err is not None:\n table['counts_err'] = self.counts.to_table()['data_err']\n table['livetime'] = self.livetime.to_table()['data']\n table['bkg'] = self.bg_rate.to_table()['data']\n if self.bg_rate.data_err is not None:\n table['bkg_err'] = self.bg_rate.to_table()['data_err']\n table.meta['HDUNAME'] = \"bkg_2d\"\n return table", "def flatten(tbl, tpcol, key, val):\n tblflat = tbl[tbl[tpcol].isin(val)]\n tblflatnm = '%s_flat' %key\n return tblflat, tblflatnm", "def filter_mapping_file(map_data, map_header, good_sample_ids,\r\n include_repeat_cols=False, column_rename_ids=None):\r\n # keeping samples\r\n to_keep = []\r\n to_keep.extend([i for i in map_data if i[0] in good_sample_ids])\r\n\r\n # keeping columns\r\n headers = []\r\n to_keep = zip(*to_keep)\r\n headers.append(map_header[0])\r\n result = [to_keep[0]]\r\n\r\n if column_rename_ids:\r\n # reduce in 1 as we are not using the first colum (SampleID)\r\n column_rename_ids = column_rename_ids - 1\r\n for i, l in enumerate(to_keep[1:-1]):\r\n if i == column_rename_ids:\r\n if len(set(l)) != len(result[0]):\r\n raise ValueError(\r\n \"The column to rename the samples is not unique.\")\r\n result.append(result[0])\r\n result[0] = l\r\n headers.append('SampleID_was_' + map_header[i + 1])\r\n elif include_repeat_cols or len(set(l)) > 1:\r\n headers.append(map_header[i + 1])\r\n result.append(l)\r\n else:\r\n for i, l in enumerate(to_keep[1:-1]):\r\n if include_repeat_cols or len(set(l)) > 1:\r\n headers.append(map_header[i + 1])\r\n result.append(l)\r\n headers.append(map_header[-1])\r\n result.append(to_keep[-1])\r\n\r\n result = map(list, zip(*result))\r\n\r\n return headers, result", "def convert_table(mkd):\n\t\n\tmd_table_codes = re.findall(r\".*\\|.*\\n.*\\-.*(?:\\n.*\\|.*)*\", mkd, re.M)\n\tfor md_code in md_table_codes:\n\t\t\n\t\tmd_rows = re.findall(r\"(.*\\|.*)\", md_code, re.M)\n\t\theader = md_rows.pop(0)\n\t\tcolumn_count = md_rows.pop(0).count(\"-\")\n\n\t\ttex_code = \"\\\\begin{tabular}{|\"+\"l|\"*column_count+\"}\\n\\hline\\n\"\n\t\ttex_code += header.strip(\" |\").replace(\"|\", \"&\")+\" \\\\\\\\\\n\\hline\\n\"\n\t\tfor row in md_rows:\n\t\t\ttex_code += row.strip(\" |\").replace(\"|\", \"&\")+\" \\\\\\\\\\n\"\n\t\ttex_code += \"\\hline\\n\\end{tabular}\"\n\n\t\tmkd = mkd.replace(md_code, tex_code)\n\n\treturn mkd", "def normalize_dataset(self):", "def revise_report_df(report_df):\n # quick check to see whether report_df column structure is as expected\n if report_df.columns.tolist() != ['0', '1', 'accuracy', 'macro avg', 'weighted avg']:\n print(\"Warning: Column names aren't as expected. Verify report_df output_dict is correct.\")\n\n report_df.columns = ['0', '1', 'accuracy', 'Macro Avg', 'Micro Avg' ]\n\n dict_columns = ['0', '1', 'Macro Avg', 'Micro Avg']\n keys = ['precision', 'recall', 'f1-score', 'support']\n\n for col in dict_columns:\n # revise key values to personalize to its associated column i.e. from 'precision' to 'precision_0'\n report_df[col] = report_df[col].apply(lambda x: customize_dict_keys(x, col, keys))\n\n # iterate row wise through dataframe to normalize dictionary values into flat tables\n new_dict = {str(classifier) + '_df': normalize_to_flat(classifier, report_df, col) for classifier in report_df.index.values.tolist()}\n\n # concat all classifier flat tables into one dataframe\n dict_df = pd.concat(list(new_dict.values())).reset_index().drop(columns=['index'], axis=1)\n\n # merge on existing report_df dataframe index and on dict_df 'classifier' column value\n report_df = report_df.merge(dict_df, how='left', left_index=True, left_on=None, right_on='classifier').set_index('classifier')\n\n # select only created columns\n report_df = report_df.iloc[:,5:]\n # sort columns and filter out 'support' related columns\n report_df = report_df[sorted([col for col in report_df.columns if 'support' not in col])]\n\n return report_df", "def print_table2(df, eval_dir):\n\n out_file = os.path.join(eval_dir, 'table2.txt')\n\n\n with open(out_file, \"w\") as text_file:\n\n for idx, struc_name in enumerate(['LV', 'RV', 'Myo']):\n # new line\n header_string = ' & '\n line_string = '({}) '.format(struc_name)\n\n for p_idx, phase in enumerate(['ED', 'ES']):\n for measure in ['dice', 'assd', 'hd']:\n\n header_string += ' & {} ({}) '.format(phase, measure)\n\n dat = df.loc[(df['phase'] == phase) & (df['struc'] == struc_name)]\n\n if measure == 'dice':\n\n line_string += ' & {:.3f}\\,({:.3f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n else:\n line_string += ' & {:.2f}\\,({:.2f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n\n if p_idx == 0:\n header_string += ' & '\n line_string += ' & '\n\n header_string += ' \\\\\\\\ \\n'\n line_string += ' \\\\\\\\ \\n'\n\n if idx == 0:\n text_file.write(header_string)\n\n text_file.write(line_string)\n\n return 0", "def __generate_table(package_response, primary_table_id):\n if isinstance(package_response, Package):\n primary_table = package_response.tables[primary_table_id]\n header_id = primary_table.definition.header_table_id\n header_table = package_response.tables[header_id]\n dimension_columns = list(filter(lambda column_obj: column_obj.is_dimension, primary_table.definition.columns))\n dimension_columns_count = len(dimension_columns)\n row_count = len(primary_table.data.rows)\n header_row_count = len(header_table.data.rows)\n\n headers = list(list())\n # Constructs the column headers by considering dimension columns and header rows\n for series_definition_column in header_table.definition.columns:\n header_row = list()\n for i in range(0, dimension_columns_count, 1):\n if dimension_columns[i].description is \"\":\n header_row.append(\" \")\n else:\n header_row.append(dimension_columns[i].description)\n\n for i in range(0, header_row_count, 1):\n header_row.append(str(_SeriesDataHelper.get_value_helper(header_table.data.columns[series_definition_column.id], series_definition_column.type, i, series_definition_column.format.null_format)))\n headers.append(header_row)\n\n data = list(list())\n # Constructs the column data\n for i in range(0, row_count, 1):\n data_row = list()\n for series_definition_column in primary_table.definition.columns:\n data_row.append(str(_SeriesDataHelper.get_value_helper(primary_table.data.columns[series_definition_column.id], series_definition_column.type, i, series_definition_column.format.null_format)))\n data.append(data_row)\n\n if len(header_table.definition.columns) > 1:\n data_frame = pd.DataFrame(data=data)\n data_frame.columns = pd.MultiIndex.from_arrays(headers)\n else:\n data_frame = pd.DataFrame(data=data, columns = headers[0])\n\n return data_frame\n\n else:\n ValueError(\"Response data passed should be of package type.\")" ]
[ "0.60229045", "0.5637049", "0.5637049", "0.53095335", "0.53014034", "0.52432746", "0.5227828", "0.51772594", "0.51730895", "0.51567525", "0.51420665", "0.51343006", "0.511792", "0.511474", "0.5104135", "0.5101256", "0.50933254", "0.5086502", "0.5055283", "0.5043328", "0.50231004", "0.50162965", "0.500538", "0.500419", "0.49976316", "0.49925217", "0.4971346", "0.49679187", "0.49610928", "0.4948993", "0.49481377", "0.49259666", "0.49022692", "0.48986858", "0.48923644", "0.48905692", "0.48836884", "0.48819518", "0.48752242", "0.48751757", "0.4848846", "0.4848253", "0.4837781", "0.48353788", "0.483378", "0.48268655", "0.48264754", "0.4818533", "0.4791805", "0.47870106", "0.47868672", "0.47828642", "0.47821832", "0.47803053", "0.47778654", "0.4775855", "0.47668943", "0.47658768", "0.47639257", "0.4759768", "0.47527832", "0.47493765", "0.47413456", "0.47404423", "0.4739769", "0.47287786", "0.4717945", "0.4713454", "0.47050968", "0.47026664", "0.4701778", "0.4699514", "0.46983793", "0.4691696", "0.46911815", "0.4687741", "0.4683702", "0.46740445", "0.4655381", "0.46490005", "0.46482182", "0.46474797", "0.4641092", "0.46394774", "0.46380594", "0.46298012", "0.46293324", "0.46255845", "0.4624531", "0.46211013", "0.46200767", "0.46178576", "0.4612732", "0.46009672", "0.45995468", "0.45978776", "0.45969847", "0.4594769", "0.45947105", "0.4587888" ]
0.5961496
1
reformat count or FPKM tables into flattened table of sample_names/values for rapid alignment of attr table with tracking_id
def reformat_countOrFPKMTable( self, countOrFPKMTable_I=None, analysis_id_I=None, sna2experimentID_I=None, sna2sns_I=None, count_or_FPKM = 'count'): #format into a dictionary of rows for quick aligning with the tracking_id countOrFPKMTable_flat = []; for row in countOrFPKMTable_I: for k,v in row.items(): if k=='tracking_id':continue; tmp = {}; tmp['analysis_id'] = analysis_id_I; tmp['tracking_id'] = row['tracking_id']; sample_name_lst = k.split('_'); sample_name_base = '_'.join(sample_name_lst[:-1]); sample_name_rep = eval(sample_name_lst[-1]); if sna2experimentID_I: experiment_id = sna2experimentID_I[sample_name_base]; else: experiment_id=None; tmp['experiment_id'] = experiment_id; if sna2sns_I: sample_name = sna2sns_I[sample_name_base][sample_name_rep]; else: sample_name=k; tmp['sample_name'] = sample_name; tmp['value'] = v; tmp['value_units'] = count_or_FPKM; tmp['used_'] = True; tmp['comment_'] = None; countOrFPKMTable_flat.append(tmp); return countOrFPKMTable_flat;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def alignAndReformat_countFPKMattrTables(\n self,analysis_id_I=None,sna2experimentID_I=None,\n sna2sns_I=None):\n #reformat\n countTable_flat = self.reformat_countTable(\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,);\n fpkmTable_flat = self.reformat_fpkmTable(\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,);\n attrTable_dict = self.reformat_attrTable();\n #align\n countAndFpkmTable_aligned = [];\n for row in countTable_flat[:]:\n row.update(attrTable_dict[row['tracking_id']]);\n countAndFpkmTable_aligned.append(row);\n for row in fpkmTable_flat[:]:\n row.update(attrTable_dict[row['tracking_id']]);\n countAndFpkmTable_aligned.append(row);\n return countAndFpkmTable_aligned;", "def reformat_countTable(\n self,analysis_id_I=None,sna2experimentID_I=None,\n sna2sns_I=None):\n if self.countTable: countTable = self.countTable[:];\n else: countTable = [];\n\n countTable_flat = self.reformat_countOrFPKMTable(\n countOrFPKMTable_I=countTable,\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,\n count_or_FPKM = 'count');\n return countTable_flat;", "def reformat_fpkmTable(\n self,analysis_id_I=None,sna2experimentID_I=None,\n sna2sns_I=None):\n if self.fpkmTable: fpkmTable = self.fpkmTable[:];\n else: fpkmTable = [];\n\n fpkmTable_flat = self.reformat_countOrFPKMTable(\n countOrFPKMTable_I=fpkmTable,\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,\n count_or_FPKM = 'fpkm');\n return fpkmTable_flat;", "def reformat_attrTable(\n self):\n #format into a dictionary of rows for quick aligning with the tracking_id\n if self.attrTable: attrTable = self.attrTable[:];\n else: attrTable = [];\n\n attrTable_dict = {};\n for row in attrTable:\n attrTable_dict[row['tracking_id']] = row;\n return attrTable_dict;", "def freq_table(a):\n Detail_freq = a.loc[:, (a.dtypes == object) | (a.dtypes == long) ].columns.get_values().tolist()\n print(Detail_freq)\n for freq in Detail_freq:\n df1 = pd.DataFrame(a[freq].value_counts(dropna=False).astype(float).map('{:20,.0f}'.format).sort_index()).rename(columns={freq:'Count'})\n df2 = pd.DataFrame(a[freq].value_counts(normalize = True, dropna=False).map('{:,.2%}'.format).sort_index()).rename(columns={freq:'Percentage'})\n df = pd.concat([df1, df2], axis = 1)\n print(df)", "def _finalize(self, sampled_data):\n final_data = dict()\n for table_name, table_rows in sampled_data.items():\n parents = self.metadata.get_parents(table_name)\n if parents:\n for parent_name in parents:\n foreign_key = self.metadata.get_foreign_key(parent_name, table_name)\n if foreign_key not in table_rows:\n parent_ids = self._find_parent_ids(table_name, parent_name, sampled_data)\n table_rows[foreign_key] = parent_ids\n\n reversed_data = self.metadata.reverse_transform(table_name, table_rows)\n\n fields = self.metadata.get_fields(table_name)\n\n final_data[table_name] = reversed_data[list(fields.keys())]\n\n return final_data", "def format_unifrac_sample_mapping(sample_ids, otu_ids, otu_table_array):\r\n out = []\r\n for i, row in enumerate(otu_table_array):\r\n for j, val in enumerate(row):\r\n if val > 0:\r\n line = [otu_ids[i], sample_ids[j], str(val)]\r\n out.append('\\t'.join(line))\r\n return out", "def per_sample_taxa_summaries(open_table, output_format):\n t = parse_biom_table(open_table)\n header = \"#taxon\\trelative_abundance\\n\"\n\n for v, id_, md in t.iter():\n with open(output_format % id_, 'w') as f:\n f.write(header)\n\n for sorted_v, taxa in \\\n sorted(zip(v, t.ids(axis='observation')))[::-1]:\n if sorted_v:\n f.write(\"%s\\t%f\\n\" % (taxa, sorted_v))", "def reformat_data(self, df, ids):\n data = np.zeros((len(ids), self.n_sample_rows + 1, self.n_features))\n idx = 0\n for i in ids:\n sample = df.loc[i]\n data[idx, 0:89, :] = sample.values\n data[idx, 89, :] = np.mean(sample.values)\n idx += 1\n return data", "def produce_mirna_single_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.single.bam.mirbase_counts.txt\")", "def make_summary_tables( res ):\n\n # transform second table to csv and read this as a dataFrame\n result_fit_df = pd.read_csv(StringIO( res.tables[1].as_csv() ), sep=\",\",index_col=0)\n result_fit_df.columns = [i.strip() for i in result_fit_df.columns]\n result_fit_df.index = [i.strip() for i in result_fit_df.index]\n\n # first table is trickier because the data is spread on to columns, and there is title line\n L = res.tables[0].as_html().split('\\n')\n L.pop(1) # get rid of the title\n tmp = pd.read_html('\\n'.join(L) , header=None)[0] # read as a dataframe, but with 4 columns \n\n names = list(tmp[0]) + list(tmp[2])[:-2] # columns 0 and 2 are metric names\n values = list(tmp[1]) + list(tmp[3])[:-2] # columns 1 and 3 are the corresponding values\n # NB : I exclude the last 2 elements which are empty \n \n result_general_df = pd.DataFrame( {'Name': names , 'Value' : values}, index = names , columns=['Value'] )\n \n return result_general_df , result_fit_df", "def normalize_counts_to_tpm(counts_dir, gff_dir, out_dir, feat='CDS', id_sym='gene_id='):\n count_files = [os.path.join(counts_dir, f) for f in os.listdir(counts_dir)]\n all_tpms = {}\n for cf in count_files:\n if \"_counts\" not in cf:\n continue\n tpm = normalize_counts_to_tpm_one_file(cf, gff_dir, feat, id_sym)\n #out_file = \"{}_tpm.csv\".format(os.path.basename(cf))\n #out_path = os.path.join(out_dir, out_file)\n #with open(out_path, \"w\") as fo:\n # for gene, t in tpm.items():\n # fo.write(\"{},{}\\n\".format(gene, t))\n prefix = os.path.basename(cf).split(\"_trimmed\")[0] # this would be specific to my naming convention\n all_tpms[prefix] = tpm\n return all_tpms", "def _create_counts(out_dts, out_dir):\n ma, ma_mirna = _merge(out_dts)\n out_ma = op.join(out_dir, \"counts.tsv\")\n out_ma_mirna = op.join(out_dir, \"counts_mirna.tsv\")\n ma.to_csv(out_ma, sep=\"\\t\")\n ma_mirna.to_csv(out_ma_mirna, sep=\"\\t\")\n return out_ma_mirna, out_ma", "def print_tables(hash_table, f_output, l_samples):\n\n l_fields = ['chr', 'pos', 'ref', 'alt', 'QUAL', 'FILTER',\n 'Func.refGene', 'Gene.refGene', 'GeneDetail.refGene', 'ExonicFunc.refGene', 'AAChange.refGene',\n 'cytoBand', 'ExAC_ALL', 'ExAC_AFR', 'ExAC_AMR', 'ExAC_EAS', 'ExAC_FIN', 'ExAC_NFE', 'ExAC_OTH',\n 'ExAC_SAS',\n 'avsnp147', 'SIFT_score', 'SIFT_pred', 'Polyphen2_HDIV_score', 'Polyphen2_HDIV_pred',\n 'Polyphen2_HVAR_score',\n 'Polyphen2_HVAR_pred', 'LRT_score', 'LRT_pred', 'MutationTaster_score', 'MutationTaster_pred',\n 'MutationAssessor_score', 'MutationAssessor_pred', 'FATHMM_score', 'FATHMM_pred', 'PROVEAN_score',\n 'PROVEAN_pred', 'VEST3_score', 'CADD_raw', 'CADD_phred', 'DANN_score', 'fathmm-MKL_coding_score',\n 'fathmm-MKL_coding_pred', 'MetaSVM_score', 'MetaSVM_pred', 'MetaLR_score', 'MetaLR_pred',\n 'integrated_fitCons_score', 'integrated_confidence_value', 'GERP++_RS', 'phyloP7way_vertebrate',\n 'phyloP20way_mammalian', 'phastCons7way_vertebrate', 'phastCons20way_mammalian', 'SiPhy_29way_logOdds']\n l_fields = l_fields + l_samples\n \n l_chr = set([item[0] for item in hash_table.keys()])\n\n fo = open(f_output, 'w')\n fo.write(','.join(l_fields) + '\\n')\n for key in sorted(hash_table.keys(), key=itemgetter(1)):\n fo.write(','.join(map(lambda field: hash_table[key].get(field, '.'), l_fields)) + '\\n')\n fo.close()", "def get_split_summary_table(all_df, train_df, test_df):\n table = PrettyTable()\n table.field_names = ['set', 'N total', 'N non-ICH', 'N ICH', 'frac non-ICH', 'frac ICH']\n for df, name in zip([all_df, train_df, test_df],['All', 'Train', 'Test']):\n table.add_row([name, len(df), len(df[df.Hemorrhage == 0]), len(df[df.Hemorrhage == 1]),\n f'{len(df[df.Hemorrhage == 0])/len(df):.3%}', f'{len(df[df.Hemorrhage == 1])/len(df):.3%}'])\n return table", "def prepare_batch_sample_set_for_metadata_export(path, tsca_id):\n raw = pd.read_table(path)\n print( \"%d Samples in this batch\" % raw.shape[0] )\n\n # Create dfs to upload\n all_samples = pd.concat([pd.DataFrame(index=raw.index, columns=['membership:sample_set_id'], data=tsca_id), \\\n raw[ ['sample_id', 'sample_type'] ]], axis=1)\n\n\n tumors = all_samples.loc[ all_samples['sample_type'] == \"Tumor\", ['membership:sample_set_id', 'sample_id'] ]\n tumors.loc[: , 'membership:sample_set_id'] = \"%s_T\"%tsca_id\n \n normals = all_samples.loc[ all_samples['sample_type'] == \"Normal\", ['membership:sample_set_id', 'sample_id'] ]\n normals.loc[: , 'membership:sample_set_id'] = \"%s_N\"%tsca_id\n\n all_samples = all_samples.drop('sample_type', axis=1)\n return (all_samples, tumors, normals)", "def produce_mirna_allbest_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.bam.mirbase_counts.txt\")", "def write_count_table(filtered_counts, gene_names, ids_liver_header):\n with open(\"filtered_tcga_counts.tsv\", \"w\") as file:\n file.write(\"gene_id\\tgene_name\\t\" + \"\\t\".join(ids_liver_header) + \"\\n\")\n for gene_name, counts_line in zip(gene_names, filtered_counts):\n file.write(gene_name + \"\\t\" + \"\\t\" + \"\\t\".join(counts_line) + \"\\n\")", "def make_lof_table(data_table, my_genes, my_samples, summary_func):\n table_header = [\"Gene\"] + my_samples + [\n \"Missense:Benign\", \"Missense:Possibly\", \"Missense:Probably\",\n \"MissenseNA\", \"Indel\", \"Nonsense\", \"Frameshift\", \"Splice-site\",\n \"Synonymous\"]\n table_records = []\n\n gs_lookup = group_data_by_gs(data_table)\n for gene in my_genes:\n synonymous = missense_benign = missense_possibly = missense_probably = \\\n missense_na = frameshift = nonsense = splice = indel = 0\n\n out_row = [gene]\n for sample in my_samples:\n normalized = [0]\n # Count mutations of each type for this gene and sample\n for entry in gs_lookup[gene][sample]:\n if entry['muttype'] == 'Silent':\n synonymous += 1\n continue\n if entry['muttype'] == 'Intron':\n # Shouldn't be here; ignore\n continue\n\n if entry['muttype'] == 'Missense_Mutation':\n if entry['consequence'] == 'benign':\n missense_benign += 1\n elif entry['consequence'] == 'possibly':\n missense_possibly += 1\n elif entry['consequence'] == 'probably':\n missense_probably += 1\n elif entry['consequence'] == 'NA':\n missense_na += 1\n else:\n print(\"Unhandled missense consequence level:\",\n entry['consequence'], file=sys.stderr)\n elif entry['muttype'] == 'Nonsense_Mutation':\n nonsense += 1\n elif entry['muttype'] == 'Splice_Site':\n splice += 1\n elif entry['muttype'] in ('Frame_Shift_Ins', 'Frame_Shift_Del'):\n frameshift += 1\n elif entry['muttype'] in ('In_Frame_Ins', 'In_Frame_Del'):\n indel += 1\n else:\n print(\"Unhandled mutation type:\", entry['muttype'],\n file=sys.stderr)\n continue\n\n normalized.append(entry['normalized'])\n # Summarize the normalized mutation counts for this gene and sample\n out_row.append(summary_func(normalized))\n out_row.extend((missense_benign, missense_possibly, missense_probably,\n missense_na, indel, nonsense, frameshift, splice,\n synonymous))\n table_records.append(out_row)\n\n return pandas.DataFrame.from_records(table_records, columns=table_header)", "def join_gene_tables(gene_tables,output,verbose=None):\n \n gene_table_data={}\n start_column_id=\"\"\n samples=[]\n file_basenames=[]\n index=0\n for gene_table in gene_tables:\n \n if verbose:\n print(\"Reading file: \" + gene_table)\n \n lines=util.process_gene_table_with_header(gene_table, allow_for_missing_header=True)\n header=next(lines)\n \n # get the basename of the file\n file_basename='.'.join(os.path.basename(gene_table).split('.')[:-1])\n file_basenames.append(file_basename)\n \n if header:\n header_info=header.split(GENE_TABLE_DELIMITER)\n if not start_column_id:\n start_column_id=header_info[0]\n # allow for multiple samples\n sample_names=header_info[1:]\n else:\n # if there is no header in the file then use the file name as the sample name\n sample_names=[file_basename]\n \n for line in lines:\n data=line.split(GENE_TABLE_DELIMITER)\n try:\n gene=data[0]\n # if the header names multiple samples, merge all samples\n # this prevents extra columns from being included in some rows\n # this requires files containing multiple samples to include a header\n data_points=data[1:len(sample_names)+1]\n except IndexError:\n gene=\"\"\n\n if gene:\n current_data=gene_table_data.get(gene,\"\")\n fill = index - current_data.count(GENE_TABLE_DELIMITER)\n if fill > 0:\n # fill in zeros for samples without data then add data point\n gene_table_data[gene]=current_data + GENE_TABLE_DELIMITER.join([\"0\"]*fill) + GENE_TABLE_DELIMITER + GENE_TABLE_DELIMITER.join(data_points) + GENE_TABLE_DELIMITER\n elif fill < 0:\n # add data point to other data point from the same sample\n current_data_points=current_data.split(GENE_TABLE_DELIMITER)\n for i,point in enumerate(data_points):\n store_index=len(data_points)*-1-1+i\n current_data_points[store_index]=str(float(current_data_points[store_index])+float(point))\n gene_table_data[gene] = GENE_TABLE_DELIMITER.join(current_data_points)\n else:\n # add data point to end of list\n gene_table_data[gene] = current_data + GENE_TABLE_DELIMITER.join(data_points) + GENE_TABLE_DELIMITER\n \n samples+=sample_names\n index+=len(sample_names)\n # if all of the header names for the files are the same\n # then use the file names as headers\n if samples.count(samples[0]) == len(samples):\n samples=file_basenames\n \n # write the joined gene table\n if not start_column_id:\n start_column_id=\"# header \"\n sample_header=[start_column_id]+samples\n total_gene_tables=len(samples)\n sorted_gene_list=util.fsort(list(gene_table_data))\n try:\n file_handle=open(output,\"w\")\n file_handle.write(GENE_TABLE_DELIMITER.join(sample_header)+\"\\n\")\n except EnvironmentError:\n sys.exit(\"Unable to write file: \" + output) \n \n for gene in sorted_gene_list:\n # extend gene data for any gene that is not included in all samples\n current_data=gene_table_data[gene]\n fill = total_gene_tables - current_data.count(GENE_TABLE_DELIMITER)\n if fill:\n current_data=current_data + GENE_TABLE_DELIMITER.join([\"0\"]*fill) + GENE_TABLE_DELIMITER\n file_handle.write(gene+GENE_TABLE_DELIMITER+current_data.rstrip(GENE_TABLE_DELIMITER)+\"\\n\")\n \n file_handle.close()", "def samples(app, args):\n engine = create_engine(args.datafile)\n meta = MetaData()\n meta.reflect(engine)\n print(\"\\t\".join([str(x).replace('counts.', '')\n for x in meta.tables['counts'].columns\n if not x == 'counts.index']))", "def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df", "def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df", "def tranform_data(args):\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen \n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == \"pkl\":\n with open(Path(args.data_dir, \"fx_labels\"), \"rb\") as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range (len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:(maxsize)]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))", "def preprocess(df):\n \n # drop the following columns - irrelevant now\n DROP_COLUMNS = ['id', 'original_title', 'release_date'\n , 'tmdbId', 'popularity', 'year']\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n \n # drop all of the language columns\n DROP_COLUMNS = [col for col in df.columns if col[:3]==\"lan\"]\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n\n # loop through the columns we want to aggregate\n for col_type in [\n \"original_language_\"\n , \"prod_comp_cntry_\"\n , \"prod_comp_names_\"\n , \"writers_\"\n , \"actors_\"\n , \"genres_\"\n , \"director_\"\n ]:\n # create a dictionary of each unique value and its frequency\n val_freq = {}\n for col in df.columns:\n if col.startswith(col_type):\n val_freq[col] = df[col].sum()\n\n # create a dataframe from this dictionary; sort by count\n counts = pd.DataFrame.from_dict(\n val_freq\n , orient='index'\n , columns=['count']\n ).sort_values('count', ascending=False)\n counts['frac'] = counts['count'].apply(lambda x: 100*x / df.shape[0])\n\n # handle special case of production company country\n if col_type == \"prod_comp_cntry_\":\n DROP_COLUMNS = [col for col in counts.index][3:]\n\n # handle special case of directors\n elif col_type == \"director_\":\n DIRECTOR_COLS = [col for col in df.columns\n if col.startswith(\"director_\")\n and col!=\"director_pop\"]\n df['established_director'] = df[DIRECTOR_COLS].max(axis=1)\n DROP_COLUMNS = DIRECTOR_COLS\n\n # handle special case of actors\n elif col_type == \"actors_\":\n ACTORS_COLS = [col for col in df.columns if \"actors\" in col]\n df['num_top_100_actors'] = df[ACTORS_COLS].sum(axis=1)\n DROP_COLUMNS = ACTORS_COLS\n\n # handle all the other cases\n else:\n DROP_COLUMNS = [col for col in counts.query('frac < 2').index]\n\n\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n \n ##########################################################################\n # adjust the data for inflation\n CPI_tf = df['CPIAUCSL'].max()\n df['budget'] = df[['budget', 'CPIAUCSL']].apply(\n cpi_adjust\n , args=(CPI_tf ,)\n , axis=1\n )\n df['revenue'] = df[['revenue', 'CPIAUCSL']].apply(\n cpi_adjust\n , args=(CPI_tf ,)\n , axis=1\n )\n # no longer need CPI data\n df.drop('CPIAUCSL', axis=1, inplace=True)\n \n ########################################################################## \n # add in useful features about the cast and crew \n df['cast_crew_sum_pop'] = (\n df['director_pop']\n + df['avg_actor_pop']\n + df['avg_writer_pop']\n )\n df['cast_crew_product_pop'] = (\n df['director_pop']\n * df['avg_actor_pop']\n * df['avg_writer_pop']\n )\n df['runtime'].replace(to_replace=0, value=df['runtime'].median(), inplace=True)\n df = df.query('10000 <= revenue').copy()\n df = df.query('100000 <= budget').copy()\n df.drop('sum_actor_pop', axis=1, inplace=True)\n df.drop('min_writer_pop', axis=1, inplace=True)\n\n # code to transform columns\n for col in [\n \"budget\", \"director_pop\", \"avg_writer_pop\"\n , \"max_writer_pop\", \"avg_actor_pop\", \"max_actor_pop\"\n , \"min_actor_pop\", 'cast_crew_sum_pop'\n , 'cast_crew_product_pop'\n ]:\n df['log10_'+col] = df[col].apply(lambda x: math.log10(x))\n df.drop(col, axis=1, inplace=True)\n \n return df", "def gen_tab(cat):\n\n col = ['FLUX_APER2','FLUX_APER4','FLUX_APER5','FLUX_APER8','FLUX_APER10','FLUX_APER14',\n 'MAG_APER2','MAG_APER4','MAG_APER5','MAG_APER8','MAG_APER10','MAG_APER14',\n 'MAG_AUTO','MAG_PETRO','KRON_RADIUS',\n 'PETRO_RADIUS','FLUX_MAX','ISOAREAF_IMAGE','x',\n 'y','ra','dec','X2_IMAGE','Y2_IMAGE','XY_IMAGE',\n 'THETA_IMAGE','X2WIN_IMAGE','Y2WIN_IMAGE','XYWIN_IMAGE','AWIN_IMAGE','BWIN_IMAGE',\n 'THETAWIN_IMAGE','AWIN_WORLD','BWIN_WORLD','THETAWIN_WORLD',\n 'MU_MAX','FLAGS','FWHM_IMAGE','ELONGATION','SEX_CLASS','FLUX_RADIUS25',\n 'FLUX_RADIUS50','FLUX_RADIUS85','FLUX_RADIUS95','FLUX_RADIUS99']\n print('generating features table: {}'.format(cat))\n tab = pd.read_table(cat,skiprows=41,sep=r'\\s+',header=None, names=col)\n\n # crop the image for just using the central part of the image\n tab = crop(tab)\n\n # add concentration column by subtracting mag10 by mag5, rejecting the detections with negative concentration\n tab['CONCENT'] = tab.MAG_APER5 - tab.MAG_APER10\n tab = tab[tab.CONCENT > 0]\n\n # normalizing the columns\n print('normalizing features...')\n seesq_norm = ['X2_IMAGE','Y2_IMAGE','X2WIN_IMAGE',\n 'Y2WIN_IMAGE','XY_IMAGE','XYWIN_IMAGE',\n 'ISOAREAF_IMAGE']\n see_norm = ['AWIN_WORLD','AWIN_WORLD','FWHM_IMAGE',\n 'KRON_RADIUS','PETRO_RADIUS','FLUX_RADIUS25',\n 'FLUX_RADIUS50','FLUX_RADIUS85',\n 'FLUX_RADIUS95','FLUX_RADIUS99']\n mag_norm = ['MAG_APER4','MAG_APER5','MAG_APER8',\n 'MAG_APER10','MAG_APER14','MAG_AUTO',\n 'MAG_PETRO','MU_MAX','CONCENT']\n flux_norm = ['FLUX_APER2','FLUX_APER4','FLUX_APER5',\n 'FLUX_APER8','FLUX_APER10','FLUX_APER14']\n fwhm_mean = tab.FWHM_IMAGE.mean()\n for seesq_col in seesq_norm:\n tab[seesq_col] = tab[seesq_col] / (fwhm_mean**2)\n for see_col in see_norm:\n tab[see_col] = tab[see_col] / fwhm_mean\n for mag_col in mag_norm:\n tab[mag_col] = tab[mag_col] * tab['MAG_APER2']\n for flux_col in flux_norm:\n tab[flux_col] = tab[flux_col] * tab['FLUX_MAX']\n tab['CONCENT'] = -1 * tab['CONCENT']\n\n # add column for galactic latitude\n print('calculating galactic latitude...')\n ra = np.array(tab['ra'].values)\n dec = np.array(tab['dec'].values)\n pos = SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='icrs')\n tab['b'] = list(pos.galactic.b.deg)\n\n tab.drop(['MAG_APER2','FLUX_MAX','x','y'], axis=1, inplace=True)\n tab.to_csv(cat[:-4]+'.csv', index=False, header=True)", "def build_table():\n with contextlib.ExitStack() as stack:\n files = [stack.enter_context(gzip.open(f, 'rt')) for f in sys.argv[1:]]\n iters = [(line.split() for line in f) for f in files]\n for it in iters:\n next(it)\n key = operator.itemgetter(0)\n table = []\n for k, g in itertools.groupby(merge(*iters, key=key), key=key):\n props = list(g)\n if len(props) == len(iters):\n table.append([k] + [x[1] for x in props])\n for snp in table:\n print(*snp)", "def print_table(source, count=False):\n table_value = []\n table_header = []\n for source_key, source_value in source.items():\n for item in source_value:\n table_value.append([v for v in item.values()])\n table_header.append([k for k in item.keys()])\n if not count:\n print(tabulate(table_value,\n headers=table_header[0],\n tablefmt='orgtbl'))\n else:\n print(tabulate([[len(source_value)]],\n headers=[source_key],\n tablefmt='orgtbl'))", "def make_table(ranked_means):\n fp = open(\"table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c|}\n \\\\hline\n \\\\multicolumn{2}{|c||}{Slowest} & \\\\multicolumn{2}{|c|}{Fastest} \\\\\\\\ \\\\hline\n Feature & Rate & Feature & Rate \\\\\\\\ \\\\hline\n\"\"\")\n top_10 = ranked_means[0:10]\n bottom_10 = ranked_means[-10:]\n for ((f_rate, f_name),(s_rate,s_name)) in zip(top_10, bottom_10):\n f_name = f_name.split(\":\")[-1]\n f_name = f_name.rsplit(\" \", 1)[0] if f_name.endswith(\"(V)\") else f_name\n s_name = s_name.split(\":\")[-1]\n s_name = s_name.rsplit(\" \", 1)[0] if s_name.endswith(\"(V)\") else s_name\n fp.write(\" %s & %.2f & %s & %.2f \\\\\\\\ \\n\" % \\\n (f_name, f_rate, s_name, s_rate))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()\n\n fp = open(\"supp_meaning_table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c||l|c||l|c|}\n \\\\hline\n Meaning & Category & Meaning & Category & Meaning & Category & Meaning & Category\\\\\\\\ \\\\hline\n\n\"\"\")\n feature_names = [f.split(\":\")[-1] for (r,f) in ranked_means]\n feature_names.sort(key=lambda s: s.lower())\n col1 = feature_names[0:25]\n col2 = feature_names[25:50]\n col3 = feature_names[50:75]\n col4 = feature_names[75:]\n for a,b,c,d in zip(col1,col2,col3,col4):\n x,y,z,w = [get_meaning_category(i) or \"Excluded\" for i in (a,b,c,d)]\n # Lop off (V)s (we needed them above for get_meaning_category to work)\n a,b,c,d = [f.rsplit(\" \", 1)[0] if f.endswith(\"(V)\") else f for f in (a,b,c,d)]\n fp.write(\"%s & %s & %s & %s & %s & %s & %s & %s\\\\\\\\ \\n\" % (a, x, b, y, c, z, d, w))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()", "def _transform_idoc(df):\n global _SIMPLECOUNT_COLUMNS\n\n try:\n df['comcnty'] = ((df['comcnty'] + 1) / 2).astype(int)\n df.columns = ['year', 'fk_simplecount_county'] + df.columns.tolist()[2:]\n\n indicator_list = [1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1620, 1621]\n \n c_nc = df['admtypo3'] == 1\n c_tv = df['admtypo3'] == 2\n c_pers = df['offtype2'] == 1 # df['offtype'] == 1\n c_prop = df['offtype2'] == 2 # df['offtype'] == 2\n c_sex = df['offtype2'] == 4 # df['offtype'] == 4\n c_drug = df['offtype2'].isin([3.1, 3.2, 3.3, 3.4, 3.5, 3.6]) # df['offtype'] == 3\n c_other = df['offtype2'].isin([0, 3, 5, 7]) # df['offtype'] == 7\n c_viol = df['offtype'] == 1\n c_male = df['sex'] == 'M'\n c_female = ~c_male\n\n c_first2 = [c_nc, c_tv]\n c_others = [c_pers, c_prop, c_sex, c_drug, c_other, c_viol, c_male, c_female]\n \n def helper(c, indicator_id, first2):\n df['fk_simplecount_indicator'] = indicator_id\n g = ['fk_simplecount_indicator', 'year', 'fk_simplecount_county']\n if first2:\n return df[c].groupby(g).size().reset_index(name='value')\n else:\n return df[c_nc & c].groupby(g).size().reset_index(name='value')\n\n out = pd.DataFrame()\n for i in range(2):\n out = out.append(helper(c_first2[i], indicator_list[i], first2=True))\n \n for i in range(len(c_others)):\n out = out.append(helper(c_others[i], indicator_list[i+2], first2=False))\n\n out = out.loc[out['fk_simplecount_county'].isin(range(1,102+1))]\n return out[_SIMPLECOUNT_COLUMNS]\n except:\n raise", "def tabular_report(sample_dictionary, blast_dictionary):\n print(\"Writing the report...\")\n sample_dict = sample_dictionary.copy()\n blast_dict = blast_dictionary.copy()\n\n #creating quick dictionary to pull out trimmed sequence\n trimmed_data_dict={}\n for key in sample_dict.keys():\n try:\n trimmed_sequence=(sample_dict[key]['trimmed_sequence'])\n key = key.strip('@')\n trimmed_data_dict.update({key:trimmed_sequence})\n except KeyError:\n continue\n\n samples = []\n for sequenceID in sample_dict:\n samples.append(sequenceID[1:])\n \n records = blast_dict.keys()\n \n columns = [\"SeqID\", \"Trimmed Sequence\", \"Trimmed Sequence Length\",\"BLAST Sequence\", \n \"BLAST SeqLength\", \"Description\", \"Accession\", \"Db\", \n \"Score\", \"E_value\", \"Percent_Identity\", \"Organism\", \n \"Source\", \"Domain\", \"Taxonomy\"]\n \n #Writing\n OUT = open(\"blast_hits_report.txt\", \"w\")\n OUT.write('\\t'.join(columns) + '\\n')\n \n NO_HITS_OUT = open(\"blast_no_hits_report.txt\", \"w\")\n NO_HITS_OUT.write(\"SeqID\\tOriginal Seq\\tOriginal Seq Length\"\n \"\\tTrimmed Seq\\tTrimmed Seq Length\\tResult\\n\")\n\n for record in blast_dict.keys():\n\n trimmed_sequence = trimmed_data_dict[record]\n length_trimmed_sequence=len(trimmed_data_dict[record])\n \n #Used Brute force coding to be able to manipulate and add new columns to output\n try: \n OUT.write(str(record)\n +'\\t'+str(trimmed_sequence)\n +'\\t'+str(length_trimmed_sequence)\n +'\\t'+str(blast_dict[record]['Sequence'])\n +'\\t'+str(blast_dict[record]['SeqLength'])\n +'\\t'+str(blast_dict[record]['Description'])\n +'\\t'+str(blast_dict[record]['Accession'])\n +'\\t'+str(blast_dict[record]['Db'])\n +'\\t'+str(blast_dict[record]['Score'])\n +'\\t'+str(blast_dict[record]['E_value'])\n +'\\t'+str(blast_dict[record]['Percent_Identity'])\n +'\\t'+str(blast_dict[record]['Organism'])\n +'\\t'+str(blast_dict[record]['Source'])\n +'\\t'+str(blast_dict[record]['Domain'])\n +'\\t'+str(blast_dict[record]['Taxonomy'])+'\\n')\n except KeyError:\n continue\n\n for sample in samples:\n\n if sample not in records:\n sample_stripped = sample.split(\"\\t\")[0]\n\n #Get original trimmed sequence for reference\n try:\n trimmed_sequence = trimmed_data_dict[sample_stripped]\n length_trimmed_sequence=len(trimmed_sequence)\n except KeyError:\n trimmed_sequence = '-'\n length_trimmed_sequence=0\n\n NO_HITS_OUT.write(sample_stripped\n #Commend out original data being reported\n + '\\t' + sample_dict['@'+sample]['sequence'] \n + '\\t' + str(len(sample_dict['@'+sample]['sequence'])) \n + '\\t' + str(trimmed_sequence)\n + '\\t' + str(length_trimmed_sequence)\n\n + '\\t' + 'NO HIT OR SEQUENCE QUALITY BELOW THRESHOLD\\n')\n OUT.close()\n NO_HITS_OUT.close()", "def reformat_file(inFile, outFile):\n \n with open(outFile, \"w\") as outHandle:\n \n\t\t# write header line\n\t\toutLine = [\"g1\", \"g2\", \"raw_count\", \"log(obs/exp)\"]\n\t\t\n\t\toutHandle.write(\"\\t\".join(outLine) + \"\\n\")\n\n\n\t\tfor i, line in enumerate(open(inFile)):\n\t\t\t\n\t\t\tif not i == 0:\n\t\t\t\t\n\t\t\t\tsp = line.strip().split(\"\\t\")\n\t\t\t\t\n\t\t\t\t# get row interaction counts and normalized obs/exp values\n\t\t\t\trawCount = sp[12]\n\t\t\t\tobsExp = sp[13]\n\t\t\t\t\n\t\t\t\tgenes1 = sp[4].split(\"|\")\n\t\t\t\tgenes2 = sp[10].split(\"|\")\n\t\t\t\t\n\t\t\t\t#~ print(g1, g2, rawCount)\n\t\t\t\t\n\t\t\t\t# iterate over all pairs\n\t\t\t\tfor g1 in genes1:\n\n\t\t\t\t\tfor g2 in genes2:\n\t\t\t\t\t\t\n\t\t\t\t\t\toutLine = [g1, g2, rawCount, obsExp]\n\t\t\t\t\t\t\n\t\t\t\t\t\toutHandle.write(\"\\t\".join(outLine) + \"\\n\")", "def to_table(self):\n table = Table()\n table['THETA_LO'] = Quantity([self.counts.offset[:-1]], unit=self.counts.offset.unit)\n table['THETA_HI'] = Quantity([self.counts.offset[1:]], unit=self.counts.offset.unit)\n table['ENERG_LO'] = Quantity([self.counts.energy[:-1]], unit=self.counts.energy.unit)\n table['ENERG_HI'] = Quantity([self.counts.energy[1:]], unit=self.counts.energy.unit)\n table['counts'] = self.counts.to_table()['data']\n if self.counts.data_err is not None:\n table['counts_err'] = self.counts.to_table()['data_err']\n table['livetime'] = self.livetime.to_table()['data']\n table['bkg'] = self.bg_rate.to_table()['data']\n if self.bg_rate.data_err is not None:\n table['bkg_err'] = self.bg_rate.to_table()['data_err']\n table.meta['HDUNAME'] = \"bkg_2d\"\n return table", "def get_normalized_data_table(table_metadata, debug=False):\n suffix = table_metadata.get('suffix', '')\n data_table = table_metadata['table_class'](\n file_path=table_metadata['csv_filename'], suffix=suffix)\n drop_headers(table_metadata['document_label'], data_table.data)\n rename_headers(table_metadata['document_label'], data_table.data)\n print_data_table_length(table_metadata['document_label'],\n data_table.data,\n debug=debug)\n return data_table", "def _collect_counts(self, instance_list):\n \"\"\" Based on each instance, I augment empirical counts for every word and its BIO label in feature_count_table and for every transition from previous label to current label in transition_count_table.\n All \"rare words\" (those words that appear less than 3 times) are replaced by <UNK>.\n I also add label|START counts.\n \"\"\"\n # Build feature_count_table of V x labels and transition_count_table of labels x labels\n for instance in instance_list: # Set of <(w, pos), l>\n index = 0\n for t in instance.data: # Tuple of (w, pos)\n index = instance.data.index(t)\n # print t[0] # word\n # print instance.label[index] # label\n if t in self.V:\n self.feature_count_table[self.V.index(t)][self.labels.index(instance.label[index])] +=1\n else:\n self.feature_count_table[self.V.index('<UNK>')][self.labels.index(instance.label[index])] +=1\n if index > 0:\n self.transition_count_table[self.labels.index(instance.label[index-1])][self.labels.index(instance.label[index])] += 1\n else:\n self.transition_count_table[len(self.labels)][self.labels.index(instance.label[index])] += 1", "def fill_table(table, keep_unknowns=False):\n if not keep_unknowns:\n table.drop(table.index[table.phylum == \"unknown\"], inplace=True)\n table.drop(table.index[table.genus == \"unknown\"], inplace=True)\n table.drop(table.index[table.eggNOG == \"unknown\"], inplace=True)\n for cohort in table[\"cohort_origin\"].unique():\n cohort_table = table.loc[table[\"cohort_origin\"] == cohort]\n\n if cohort not in table_summary:\n table_summary[cohort] = {}\n\n if \"N\" not in table_summary[cohort]:\n table_summary[cohort][\"N\"] = 1\n else:\n table_summary[cohort][\"N\"] += 1\n\n if \"phylum\" not in table_summary[cohort]:\n table_summary[cohort][\"phylum\"] = pd.DataFrame(columns=[\"phylum\"])\n\n #count the number of unique values in the phylum column\n table_phylum = cohort_table.loc[table['genus'] != \"unknown\",['phylum']].apply(pd.Series.value_counts,axis=0)\n table_summary[cohort][\"phylum\"] = table_summary[cohort][\"phylum\"].add(table_phylum,fill_value=0)\n \n if \"genus\" not in table_summary[cohort]:\n table_summary[cohort][\"genus\"] = pd.DataFrame(columns=[\"genus\"])\n\n #count the number of unique values in the genus column\n table_genus = cohort_table.loc[table['genus'] != \"unknown\",['genus']].apply(pd.Series.value_counts,axis=0)\n table_summary[cohort][\"genus\"] = table_summary[cohort][\"genus\"].add(table_genus,fill_value=0)\n \n if \"eggNOG\" not in table_summary[cohort]:\n table_summary[cohort][\"eggNOG\"] = pd.DataFrame(columns=[\"eggNOG\"])\n\n index = 0\n cogs_table = {}\n\n #forced to do this by each row because one gene belongs to multiple cogs\n for row in cohort_table[\"eggNOG\"]:\n if \"gene_cog\" not in table_summary[cohort]:\n table_summary[cohort][\"gene_cog\"] = 0\n table_summary[cohort][\"gene_cog\"] += 1\n if \";\" in row:\n cogs = row.split(\";\")\n for cog in cogs:\n if cog not in cogs_table:\n cogs_table[cog] = 0\n cogs_table[cog] += 1\n else:\n if row not in cogs_table:\n cogs_table[row] = 0\n cogs_table[row] += 1\n\n #create data frame and create the table\n cog_pd = pd.DataFrame.from_dict(cogs_table, orient=\"index\")\n cog_pd.columns = [\"eggNOG\"]\n table_summary[cohort][\"eggNOG\"] = table_summary[cohort][\"eggNOG\"].add(cog_pd,fill_value=0)", "def revise_report_df(report_df):\n # quick check to see whether report_df column structure is as expected\n if report_df.columns.tolist() != ['0', '1', 'accuracy', 'macro avg', 'weighted avg']:\n print(\"Warning: Column names aren't as expected. Verify report_df output_dict is correct.\")\n\n report_df.columns = ['0', '1', 'accuracy', 'Macro Avg', 'Micro Avg' ]\n\n dict_columns = ['0', '1', 'Macro Avg', 'Micro Avg']\n keys = ['precision', 'recall', 'f1-score', 'support']\n\n for col in dict_columns:\n # revise key values to personalize to its associated column i.e. from 'precision' to 'precision_0'\n report_df[col] = report_df[col].apply(lambda x: customize_dict_keys(x, col, keys))\n\n # iterate row wise through dataframe to normalize dictionary values into flat tables\n new_dict = {str(classifier) + '_df': normalize_to_flat(classifier, report_df, col) for classifier in report_df.index.values.tolist()}\n\n # concat all classifier flat tables into one dataframe\n dict_df = pd.concat(list(new_dict.values())).reset_index().drop(columns=['index'], axis=1)\n\n # merge on existing report_df dataframe index and on dict_df 'classifier' column value\n report_df = report_df.merge(dict_df, how='left', left_index=True, left_on=None, right_on='classifier').set_index('classifier')\n\n # select only created columns\n report_df = report_df.iloc[:,5:]\n # sort columns and filter out 'support' related columns\n report_df = report_df[sorted([col for col in report_df.columns if 'support' not in col])]\n\n return report_df", "def _get_table_from_samples(self, index):\n df = pd.DataFrame()\n for sample in self.samples:\n sd = sample.to_dict()\n ser = pd.Series(\n {k: v for (k, v) in list(sd.items()) if not k.startswith(\"_\")}\n )\n df = df.append(ser, ignore_index=True)\n index = [index] if isinstance(index, str) else index\n if not all([i in df.columns for i in index]):\n _LOGGER.debug(\n \"Could not set {} index. At least one of the \"\n \"requested columns does not exist: {}\".\n format(CFG_SAMPLE_TABLE_KEY, index))\n return df\n _LOGGER.debug(\"Setting sample_table index to: {}\".format(index))\n df.set_index(keys=index, drop=False, inplace=True)\n return df", "def produce_mirna_unique_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.uniq.bam.mirbase_counts.txt\")", "def prepare_batch_pairsets_for_metadata_export(all_samples, pairs, tsca_id):\n tn_pairs = pairs[(pairs['match_type'] == \"tumor_normal\") & (pairs['case_sample_tsca_id']==tsca_id)]\n tp_pairs = pairs[(pairs['match_type'] == \"tumor_primary\") & (pairs['case_sample_tsca_id']==tsca_id)]\n\n tn_pairsets = pd.merge(tn_pairs, all_samples[['entity:sample_id', 'tsca_id']], \\\n left_on='case_sample_id', right_on='entity:sample_id', \\\n how='inner')[['tsca_id', 'entity:pair_id']] \\\n .rename(columns={'tsca_id': 'membership:pair_set_id', 'entity:pair_id': 'pair_id'})\n\n tp_pairsets = pd.merge(tp_pairs, all_samples[['entity:sample_id', 'tsca_id']], \\\n left_on='case_sample_id', right_on='entity:sample_id', \\\n how='inner')[['tsca_id', 'entity:pair_id']] \\\n .rename(columns={'tsca_id': 'membership:pair_set_id', 'entity:pair_id': 'pair_id'})\n\n # Append _TN/_TP to the end of the tumor-normal/tumor-primary pair set ids\n tn_pairsets['membership:pair_set_id'] = tn_pairsets['membership:pair_set_id'].apply(lambda x: \"%s_TN\"%x)\n tp_pairsets['membership:pair_set_id'] = tp_pairsets['membership:pair_set_id'].apply(lambda x: \"%s_TP\"%x)\n \n return (tn_pairsets, tp_pairsets)", "def getAllSampleFields(self, sample_id, study_id):\n sample_tables = []\n sample_tables.append('sample')\n sample_tables.append('common_fields')\n sample_tables.append('extra_sample_')\n sample_tables.append('air')\n sample_tables.append('other_environment')\n sample_tables.append('sediment')\n sample_tables.append('soil')\n sample_tables.append('wastewater_sludge')\n sample_tables.append('water')\n sample_tables.append('host_assoc_vertibrate')\n sample_tables.append('host_associated_plant')\n sample_tables.append('human_associated')\n sample_tables.append('host_sample')\n sample_tables.append('host')\n \n filled_fields = {}\n \n con = self.getMetadataDatabaseConnection()\n cursor = con.cursor()\n \n for table in sample_tables:\n if 'extra_sample_' in table:\n statement = 'select * from %s%s where sample_id = %s' % (table, study_id, sample_id)\n elif table == 'host':\n statement = 'select * from host h inner join host_sample hs on h.host_id = hs.host_id where sample_id = %s' % sample_id\n else:\n statement = 'select * from %s where sample_id = %s' % (table, sample_id)\n \n try:\n cursor.execute(statement)\n except Exception, e:\n print str(e)\n print 'Error running query:\\n'\n print statement\n print 'Running next query...\\n'\n \n continue\n \n data = cursor.fetchall()\n\n # Get the column names\n col_names = []\n for i in range(0, len(cursor.description)):\n col_names.append(cursor.description[i][0])\n \n # Find the rows with data\n for row in data:\n i = 0\n for field in row:\n if field != None and field != '':\n filled_fields[col_names[i]] = field\n i += 1\n \n return filled_fields", "def summarize_sample_props(psd_list, sample_list):\n prop_list = [psd.sample_props for psd in psd_list]\n cols = ['amplicon median', 'mean size', 'lower size', 'upper size']\n\n return pd.DataFrame(prop_list, columns=cols, index=sample_list)", "def column_creator(path):\n if not os.path.exists(path+'tables'):\n os.makedirs(path+'tables')\n\n\n # Sequences\n if os.path.exists(path+'SEQ.txt'):\n with open(os.path.join(path+'SEQ.txt')) as f1, open(os.path.join(path+'tables/sequences_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n # Modifications\n if os.path.exists(path + 'modifications.txt'):\n\n with open(os.path.join(path+'modifications.txt')) as f1, open(os.path.join(path+'tables/modifications_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n # Spectrum identify:\n if os.path.exists(path + 'spectrum_identify.txt'):\n\n with open(os.path.join(path+'spectrum_identify.txt')) as f1, open(path+'tables/spectrum_ide_table.txt', 'a') as f3:\n lines1 = f1.read().count('\\n')\n f3.write(\"%s\\n%s\\n\" % (\"Spectrum Number\",lines1))\n f1.close()\n f3.close()\n\n if os.path.exists(path + 'spectrum_unidentify.txt'):\n with open(os.path.join(path+'spectrum_unidentify.txt')) as f2, open(path+'tables/spectrum_unide_table.txt', 'a') as f3:\n lines2 = f2.read().count('\\n')\n f3.write(\"%s\\n%s\\n\" % (\"Spectrum Number\",lines2))\n f2.close()\n f3.close()\n\n if os.path.exists(path+'taxonomy_identify.txt'):\n # Taxonomy ide:\n with open(os.path.join(path+'taxonomy_identify.txt')) as f1, open(os.path.join(path+'tables/taxonomy_ide_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n\n if os.path.exists(path + 'taxonomy_unidentify.txt'):\n # Taxonomy unide:\n with open(os.path.join(path+'taxonomy_unidentify.txt')) as f1, open(os.path.join(path+'tables/taxonomy_unide_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()", "def finalize(param, input_files='count_files'):\n\n import csv\n HELPER.writeLog('Collecting featureCount raw counts ... \\n', param)\n\n #check which of these files are actually available\n working_files = [iFile for iFile in param[input_files] if iFile != '']\n\n if len(working_files) > 0:\n #get feature ID using the first column in the first file in the list of working files\n csv_file = open(working_files[0])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n #For featureCount output, we want to skip the first two lines as they\n #include the featureCount call and the headers which we don't want\n next(csv_reader, None)\n next(csv_reader, None)\n\n #Now start by taking the list of identifier,\n #which is the first column in the file\n counts = [row[0] for row in csv_reader]\n csv_file.close()\n\n #get all the expression values\n header = 'ID'\n for idx in range(param['num_samples']):\n if param[input_files] != '':\n header = header+'\\t'+param['stub'][idx]\n csv_file = open(param[input_files][idx])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n #Here too we want to skip the first two lines, before getting the counts\n next(csv_reader, None)\n next(csv_reader, None)\n #Now start getting the counts (row[6]) and add in the ID (counts[i]) before it\n idx = 0\n for row in csv_reader:\n counts[idx] = counts[idx]+'\\t'+row[6]\n idx += 1\n csv_file.close()\n\n #output the file\n out_file = param['working_dir']+'deliverables/featureCount_raw_counts.txt'\n out_handle = open(out_file, 'w')\n out_handle.write(header+'\\n')\n\n for i in range(len(counts)):\n out_handle.write(counts[i]+'\\n')\n out_handle.close()\n\n #output_phenotype_file\n HELPER.writeLog('Writing phenotype data ... \\n', param)\n MODULE_HELPER.output_sample_info(param)\n\n #write summary stats\n #featureCount does this on its own so we can just fetch each summary file\n #check which of these files are actually available\n working_files = [iFile+'.summary' for iFile in param[input_files] if iFile != '']\n\n if len(working_files) > 0:\n #get Status column from summary file using the first column in\n #the first file in the list of working files\n csv_file = open(working_files[0])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n #Here, we want to skip the first line, as it simply points to the\n #alignment file used when running featureCount\n next(csv_reader, None)\n\n #Now start by taking the list of identifier,\n #which is the first column in the file\n entry = [row[0] for row in csv_reader]\n csv_file.close()\n\n #get all the summary stats for each sample\n header = 'Status'\n\n for idx in range(param['num_samples']):\n if param[input_files] != '':\n header = header+'\\t'+param['stub'][idx]\n #Fetch the corresponding sample's summary file\n csv_file = open(param[input_files][idx]+'.summary')\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n #Again, we want to skip the first line, as it simply points\n #to the alignment file used when running featureCount\n next(csv_reader, None)\n\n #Now start getting the stats (row[1]) and add in the Status\n # (counts[i]) before it\n i = 0\n for row in csv_reader:\n entry[i] = entry[i]+'\\t'+row[1]\n i += 1\n csv_file.close()\n #output the file\n out_handle = open(param['working_dir']+\n 'results/featureCount/featureCount_stats.txt',\n 'w')\n out_handle.write(header+'\\n')\n\n for i in range(len(entry)):\n out_handle.write(entry[i]+'\\n')\n out_handle.close()\n else:\n print 'featureCount was not run successfully on any of the files..\\n'", "def test_toTable(self):\r\n # Empty results.\r\n out_f = StringIO()\r\n self.res1.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(),\r\n \"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\\n\")\r\n out_f.close()\r\n\r\n # Results with multiple samples.\r\n exp = \"\"\"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\r\nS1\\t5\\t21\\t1.5\\t2.5\\t3.5\r\nS1\\t10\\t20\\t2.5\\t2.5\\t3.5\r\nS1\\t20\\t30\\t3.5\\t2.5\\t3.5\r\nS2\\t1\\t3\\t0.4\\t2.5\\t3.5\r\n\"\"\"\r\n out_f = StringIO()\r\n self.res2.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()\r\n\r\n # Custom header.\r\n exp = \"\"\"foo\\tbar\\tbaz\\tbazaar\\tbazaaar\\tbazaaaar\r\nS1\\t5\\t21\\t1.5\\t2.5\\t3.5\r\n\"\"\"\r\n out_f = StringIO()\r\n self.res1.addSample('S1', 42)\r\n self.res1.addSampleEstimate('S1', 5, 21, 1.5, 2.5, 3.5)\r\n self.res1.toTable(out_f,\r\n header=['foo', 'bar', 'baz', 'bazaar', 'bazaaar', 'bazaaaar'])\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()\r\n\r\n # Invalid header.\r\n with self.assertRaises(ValueError):\r\n out_f = StringIO()\r\n self.res1.toTable(out_f, header=['foo'])\r\n\r\n # Cells with None as their value.\r\n exp = \"\"\"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\r\nS1\\t43\\tN/A\\tN/A\\tN/A\\tN/A\r\n\"\"\"\r\n out_f = StringIO()\r\n res = RichnessEstimatesResults()\r\n res.addSample('S1', 42)\r\n res.addSampleEstimate('S1', 43, None, None, None, None)\r\n res.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()", "def split_otu_table_on_sample_metadata(otu_table_f, mapping_f, mapping_field):\r\n mapping_f = list(mapping_f)\r\n mapping_values = get_mapping_values(mapping_f, mapping_field)\r\n otu_table = parse_biom_table(otu_table_f)\r\n\r\n for v in mapping_values:\r\n v_fp_str = v.replace(' ', '_')\r\n sample_ids_to_keep = sample_ids_from_metadata_description(\r\n mapping_f, valid_states_str=\"%s:%s\" % (mapping_field, v))\r\n\r\n try:\r\n filtered_otu_table = otu_table.filterSamples(\r\n lambda values, id_, metadata: id_ in sample_ids_to_keep)\r\n except TableException:\r\n # all samples are filtered out, so no otu table to write\r\n continue\r\n yield v_fp_str, format_biom_table(filtered_otu_table)", "def _prepare_stats_table(self, pinfos):\n\n stats_tbl = OrderedDict()\n stats_tbl[\"Title\"] = OrderedDict()\n for res in self.rsts:\n stats_tbl[res.reportid] = OrderedDict()\n\n for pinfo in pinfos:\n for colname in (pinfo[\"colname\"], pinfo[\"xcolname\"]):\n if colname in stats_tbl[\"Title\"]:\n continue\n\n # Each column name is represented by a row in the statistics table. Fill the \"Title\"\n # column.\n title_dict = stats_tbl[\"Title\"][colname] = OrderedDict()\n defs = self._refdefs.info[colname]\n\n if defs.get(\"unit\") == \"nanosecond\":\n # Convert nanoseconds to microseconds.\n unit = \"us\"\n else:\n unit = defs.get(\"short_unit\", \"\")\n\n title_dict[\"colname\"] = colname\n if unit:\n title_dict[\"colname\"] += f\", {unit}\"\n title_dict[\"coldescr\"] = defs[\"descr\"]\n\n title_dict[\"funcs\"] = OrderedDict()\n for funcname in self._stats_funcs:\n if funcname in self.rsts[0].cstats[colname]:\n title_dict[\"funcs\"][funcname] = RORawResult.get_stat_func_descr(funcname)\n\n # Now fill the values for each result.\n for res in self.rsts:\n res_dict = stats_tbl[res.reportid][colname] = OrderedDict()\n res_dict[\"funcs\"] = OrderedDict()\n\n for funcname in title_dict[\"funcs\"]:\n val = res.cstats[colname][funcname]\n fmt = \"{}\"\n if defs.get(\"unit\") == \"nanosecond\" and \"index\" not in funcname:\n val /= 1000\n fmt = \"{:.2f}\"\n if defs[\"type\"] == \"float\":\n fmt = \"{:.2f}\"\n\n fdict = res_dict[\"funcs\"][funcname] = OrderedDict()\n fdict[\"val\"] = fmt.format(val)\n fdict[\"raw_val\"] = val\n\n if self._refres.reportid == res.reportid:\n fdict[\"hovertext\"] = \"This is the reference result, other results \" \\\n \"are compared to this one.\"\n continue\n\n ref_fdict = stats_tbl[self._refres.reportid][colname][\"funcs\"][funcname]\n change = val - ref_fdict[\"raw_val\"]\n if ref_fdict[\"raw_val\"]:\n percent = (change / ref_fdict[\"raw_val\"]) * 100\n else:\n percent = change\n change = fmt.format(change) + unit\n percent = \"{:.1f}%\".format(percent)\n fdict[\"hovertext\"] = f\"Change: {change} ({percent})\"\n\n return stats_tbl", "def summarize(crosswalk, incidence_table, control_spec):\n\n include_integer_colums = not setting('NO_INTEGERIZATION_EVER', False)\n\n crosswalk_df = crosswalk.to_frame()\n incidence_df = incidence_table.to_frame()\n\n geographies = setting('geographies')\n seed_geography = setting('seed_geography')\n meta_geography = geographies[0]\n sub_geographies = geographies[geographies.index(seed_geography) + 1:]\n hh_id_col = setting('household_id_col')\n\n meta_ids = crosswalk_df[meta_geography].unique()\n for meta_id in meta_ids:\n meta_summary_df = \\\n meta_summary(incidence_df, control_spec, meta_geography,\n meta_id, sub_geographies, hh_id_col)\n out_table('%s_%s' % (meta_geography, meta_id), meta_summary_df)\n\n hh_weights_summary = pd.DataFrame(index=incidence_df.index)\n\n # add seed level summaries\n seed_weights_df = get_weight_table(seed_geography)\n hh_weights_summary['%s_balanced_weight' % seed_geography] = seed_weights_df['balanced_weight']\n if include_integer_colums:\n hh_weights_summary['%s_integer_weight' % seed_geography] = seed_weights_df['integer_weight']\n\n for geography in sub_geographies:\n\n weights_df = get_weight_table(geography)\n\n if weights_df is None:\n continue\n\n if include_integer_colums:\n hh_weight_cols = [hh_id_col, 'balanced_weight', 'integer_weight']\n else:\n hh_weight_cols = [hh_id_col, 'balanced_weight']\n\n hh_weights = weights_df[hh_weight_cols].groupby([hh_id_col]).sum()\n hh_weights_summary['%s_balanced_weight' % geography] = hh_weights['balanced_weight']\n if include_integer_colums:\n hh_weights_summary['%s_integer_weight' % geography] = hh_weights['integer_weight']\n\n # aggregate to seed level\n aggegrate_weights = weights_df.groupby([seed_geography, hh_id_col], as_index=False).sum()\n aggegrate_weights.set_index(hh_id_col, inplace=True)\n\n if include_integer_colums:\n aggegrate_weight_cols = [seed_geography, 'balanced_weight', 'integer_weight']\n else:\n aggegrate_weight_cols = [seed_geography, 'balanced_weight']\n\n aggegrate_weights = aggegrate_weights[aggegrate_weight_cols]\n aggegrate_weights['sample_weight'] = incidence_df['sample_weight']\n aggegrate_weights['%s_preliminary_balanced_weight' % seed_geography] = \\\n seed_weights_df['preliminary_balanced_weight']\n aggegrate_weights['%s_balanced_weight' % seed_geography] = \\\n seed_weights_df['balanced_weight']\n if include_integer_colums:\n aggegrate_weights['%s_integer_weight' % seed_geography] = \\\n seed_weights_df['integer_weight']\n\n out_table('%s_aggregate' % (geography,), aggegrate_weights)\n\n summary_col = 'integer_weight' if include_integer_colums else 'balanced_weight'\n df = summarize_geography(seed_geography, summary_col, hh_id_col,\n crosswalk_df, weights_df, incidence_df)\n out_table('%s_%s' % (geography, seed_geography,), df)\n\n df = summarize_geography(geography, summary_col, hh_id_col,\n crosswalk_df, weights_df, incidence_df)\n out_table('%s' % (geography,), df)\n\n out_table('hh_weights', hh_weights_summary)", "def accumulate_entries_as_tables(entries):\n name_table = {}\n num_table = {}\n for number, name in entries:\n name_table[name] = number\n num_table[number] = name\n\n return name_table, num_table", "def table_summary():\n \n t = dict()\n t['name'] = get_names()\n t['Name'] = [get_properties(name)['label'] for name in t['name']]\n N = len(t['name'])\n \n # host\n t['host'] = ['Sagittarius', 'Sagittarius', 'none', 'Gaia-Sausage-Enceladus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Sequoia / Arjuna', np.nan, np.nan, 'Sequoia / Arjuna', 'Gaia-Sausage-Enceladus', 'Sequoia / Arjuna', 'Helmi / Wukong', 'Helmi / Wukong', 'Sagittarius', 'in situ / Helmi / Wukong', 'Helmi / Wukong', 'Cetus', 'Cetus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Cetus', 'Sequoia / Arjuna / I\\'itoi']\n \n # progenitor\n t['progenitor'] = [np.nan, np.nan, 'itself', 'NGC 5139', 'NGC 4590', np.nan, 'NGC 3201', '(Wukong / Helmi)', '(Wukong / Helmi)', np.nan, np.nan, np.nan, np.nan, 'NGC 5024', np.nan, 'NGC 5272', 'NGC 5024', 'NGC 5824', 'NGC 5824', np.nan, np.nan, np.nan, np.nan]\n \n # progenitor type\n t['type'] = ['DG' if name in ['elqui', 'indus', 'jhelum'] else 'GC' for name in t['name']]\n \n # metallicity\n t['feh'] = [-2.4, -2.4, -2.2, -1.5, -2.16, -2.3, -1.5, -2.1, -2.1, -1.6, -1.95, -1.6, -2.7, np.nan, -1.7, -1.1, -2.7, -1.9, np.nan, np.nan, -2.2, np.nan, -1.9]\n \n # associations\n t['friends'] = ['ATLAS', 'Aliqa Uma', np.nan, np.nan, np.nan, np.nan, np.nan, 'Jhelum', 'Indus', np.nan, np.nan, np.nan, np.nan, 'Sylgr', np.nan, np.nan, 'Ravi', 'Turbio', 'Triangulum', np.nan, np.nan, np.nan, np.nan]\n \n tout = Table(t)\n tout.pprint()\n tout.write('../data/stream_origin.fits', overwrite=True)", "def squash_records(key, sample_lookup, sample_ids, sample_indexes, normal_id):\n chrom, pos, ref, alt = key\n ids = []\n quals = []\n filters = []\n infos = []\n formats = []\n gtypes = {sid: [] for sid in sample_ids}\n caller_flags = {sid: collections.OrderedDict([(key, None)\n for key in CALLER_CODES])\n for sid in sample_ids}\n for sample_id, caller_lookup in sample_lookup.iteritems():\n caller_ids = []\n caller_quals = []\n caller_filters = []\n caller_infos = []\n for caller, records in caller_lookup.iteritems():\n # Squash repeat calls on a sample (e.g. FreeBayes single/somatic)\n caller_ids.append(consense_values([rec.ID for rec in records]))\n caller_quals.append(consense_values([rec.QUAL for rec in records]))\n caller_filters.extend(chain(*[rec.FILTER\n for rec in records if rec.FILTER]))\n caller_infos.append(consense_dicts([rec.INFO for rec in records]))\n formats.extend(chain(*[rec.FORMAT.split(':') for rec in records]))\n calls = [rec.genotype(sample_id) for rec in records]\n gtypes[sample_id].extend(calls) # ENH: consense\n caller_code = CALLER_NAMES[caller]\n caller_val = max(call.is_variant for call in calls)\n if caller_val is not None:\n caller_val = int(caller_val)\n caller_flags[sample_id][caller_code] = caller_val\n ids.extend(caller_ids)\n quals.extend(caller_quals)\n filters.extend(caller_filters)\n infos.extend(caller_infos)\n formats = list(unique(formats))\n out_record = Record(chrom, pos, consense_values(ids),\n ref, [alt], consense_values(quals),\n list(unique(filters)),\n consense_dicts(infos),\n \":\".join(formats + CALLER_CODES),\n sample_indexes)\n out_record.samples = [Call(out_record, sid,\n make_call_data(formats, gtypes[sid],\n caller_flags[sid]))\n for sid in sample_ids]\n\n # Post-processing\n # Fix AN and AC, which are defined specially in the VCF spec\n all_alleles = list(chain(*[gt.gt_alleles for gt in out_record.samples\n if gt.gt_nums]))\n # AN (1): \"Total number of alleles in called genotypes\"\n out_record.INFO[\"AN\"] = sum(a != '.' for a in all_alleles)\n # AC (A): \"Total number of alternate alleles in called genotypes\"\n # ENH: don't crash on multiallelics?\n out_record.INFO[\"AC\"] = sum(sum(gta not in ('.', '0') for gta in gt.gt_alleles)\n for gt in out_record.samples\n if gt.gt_nums)\n\n # NB: Last 2 validation errors:\n # CGP-1240 FORMAT tag [GL] expected different number of values (expected 2, found 3)\n # chr2\t209113113\t.\tG\tA\t2714.98\tPASS\tSOMATIC;NS=1;DP=653;DPB=716.75;AC=1;AN=3;AF=0.375;RO=590;AO=123;PRO=0.0;PAO=0.0;QR=21870;QA=4741;PQR=0.0;PQA=0.0;SRF=386;SRR=203;SAF=90;SAR=33;SRP=127.1;SAP=60.3689;AB=0.233397;ABP=328.364;RUN=1;RPP=24.6368;RPPR=13.5915625;RPL=44.0;RPR=79.0;EPP=4.44029;EPPR=8.549435;DPRA=1.56225;ODDS=272.57225;GTI=0;TYPE=snp;CIGAR=1X;NUMALT=1;MEANALT=2.0;LEN=1;MQM=60.0;MQMR=60.0177;PAIRED=0.95122;PAIREDR=0.958916;technology.ILLUMINA=1.0;MQ0=0;VT=SNP;BaseQRankSum=3.369;Dels=0.0;FS=2.338;HaplotypeScore=31.1133;MLEAC=1;MLEAF=0.5;MQ=60.02;MQRankSum=0.461;QD=7.05;ReadPosRankSum=-0.95;SOR=0.953;SF=0\tGT:AD:BQ:DP:FA:SS:GMIMUT:GMIMAF:GMICOV:GQ:RO:QR:AO:QA:GL:PL:CallHC:CallUG:CallFB:CallPI:CallSID:CallMU:LR\t0/1:387,117:38.0:506:0.239:2:117:23.0:504:120.456:401:14917:123:4741:-285.03,0.0,-1199.66:3594,0,14408:.:1:1:.:.:1:0.0723\t0:200,0:.:227:0.0:0:0:0.0:226:141.912:252:9271:0:0:0.0,-66.9517,-832.671:.:.:.:0:.:.:0:0.0607\n # chr5\t176715884\t.\tT\tG\t2652.62\tPASS\tSOMATIC;NS=1;DP=718;DPB=806.5;AC=1;AN=3;AF=0.375;RO=684;AO=122;PRO=0.0;PAO=0.0;QR=26587;QA=4642;PQR=0.0;PQA=0.0;SRF=334;SRR=350;SAF=62;SAR=60;SRP=3.803265;SAP=3.0815;AB=0.225508;ABP=357.065;RUN=1;RPP=5.57335;RPPR=3.3676575;RPL=67.0;RPR=55.0;EPP=4.14943;EPPR=9.662735;DPRA=1.1461875;ODDS=318.98275;GTI=0;TYPE=snp;CIGAR=1X;NUMALT=1;MEANALT=1.0;LEN=1;MQM=60.0;MQMR=60.0;PAIRED=0.991803;PAIREDR=0.97694275;technology.ILLUMINA=1.0;MQ0=0;VT=SNP;BaseQRankSum=-2.525;Dels=0.0;FS=0.0;HaplotypeScore=15.7349;MLEAC=1;MLEAF=0.5;MQ=60.0;MQRankSum=2.816;QD=6.74;ReadPosRankSum=1.308;SOR=0.691;SF=0\tGT:AD:BQ:DP:FA:SS:GMIMUT:GMIMAF:GMICOV:GQ:RO:QR:AO:QA:GL:PL:CallHC:CallUG:CallFB:CallPI:CallSID:CallMU:LR\t0/1:408,121:37.0:529:0.22:2:121:23.0:529:117.5855:419:16330:122:4642:-270.938,0.0,-1322.0:3592,0,16251:.:1:1:.:.:1:0.022\t0:284,0:.:319:0.0:0:0:0.0:319:136.171:354:13676:0:0:0.0,-95.905,-1230.06:.:.:.:0:.:.:0:0.0006\n # * FreeBayes somatic produces the 3 GL values like this, w/e\n # chr2\t209113113\t.\tG\tA\t2714.98\t.\tAB=0.233397;ABP=328.364;AC=1;AF=0.25;AN=4;AO=123;CIGAR=1X;DP=780;DPB=780;DPRA=2.083;EPP=4.44029;EPPR=9.74419;GTI=0;LEN=1;MEANALT=2;MQM=60;MQMR=60.0153;NS=2;NUMALT=1;ODDS=155.143;PAIRED=0.95122;PAIREDR=0.960184;PAO=0;PQA=0;PQR=0;PRO=0;QA=4741;QR=24188;RO=653;RPL=44;RPP=24.6368;RPPR=15.384;RPR=79;RUN=1;SAF=90;SAP=60.3689;SAR=33;SRF=424;SRP=129.458;SRR=229;TYPE=snp;technology.ILLUMINA=1;SOMATIC\tGT:GQ:DP:RO:QR:AO:QA:GL:GMIMUT:GMIMAF:GMICOV\t0/1:141.912:527:401:14917:123:4741:-285.03,0,-1199.66:123:23:524\t0/0:141.912:253:252:9271:0:0:0,-66.9517,-832.671:0:0:252\n # chr5\t176715884\t.\tT\tG\t2652.62\t.\tAB=0.225508;ABP=357.065;AC=1;AF=0.25;AN=4;AO=122;CIGAR=1X;DP=895;DPB=895;DPRA=1.52825;EPP=4.14943;EPPR=7.73248;GTI=0;LEN=1;MEANALT=1;MQM=60;MQMR=60;NS=2;NUMALT=1;ODDS=221.81;PAIRED=0.991803;PAIREDR=0.978008;PAO=0;PQA=0;PQR=0;PRO=0;QA=4642;QR=30006;RO=773;RPL=67;RPP=5.57335;RPPR=3.48505;RPR=55;RUN=1;SAF=62;SAP=3.0815;SAR=60;SRF=377;SRP=4.0244;SRR=396;TYPE=snp;technology.ILLUMINA=1;SOMATIC\tGT:GQ:DP:RO:QR:AO:QA:GL:GMIMUT:GMIMAF:GMICOV\t0/1:136.171:541:419:16330:122:4642:-270.938,0,-1322:122:23:541\t0/0:136.171:354:354:13676:0:0:0,-95.905,-1230.06:0:0:354\n\n # Set SOMATIC flag (only add, don't remove existing flags)\n global CNT_ATYPICAL\n global CNT_TYPICAL\n if normal_id:\n normal_gt = out_record.genotype(normal_id)\n if ((not normal_gt.is_variant) and\n (not out_record.INFO.get(\"SOMATIC\")) and\n any(gt.is_variant for gt in out_record.samples)):\n out_record.INFO[\"SOMATIC\"] = True\n if normal_gt.gt_nums is None:\n CNT_TYPICAL += 1\n else:\n CNT_ATYPICAL += 1\n\n return out_record", "def summarize(data, verbal=False, using_files=True):\n\n if using_files:\n for file_name in tqdm(data):\n fill_table(pd.read_csv(file_name))\n else:\n for table in tqdm(data):\n fill_table(table)\n\n for cluster in table_summary:\n #total_genes = sum(table_summary[cluster][\"phylum\"].values) # number of genes\n #total_genes = table_summary[cluster][\"N\"] # number of samples\n total_genes = table_summary[cluster][\"eggNOG\"].eggNOG.sum() # number of genes in COGs with duplicates\n \n phylum_percent = table_summary[cluster][\"phylum\"].apply(lambda x: x/total_genes * 100)\n phylum_percent.columns = [\"percent\"]\n table_summary[cluster][\"phylum\"] = pd.concat([table_summary[cluster][\"phylum\"],phylum_percent],axis=1)\n\n #Read above for fix\n genus_percent = table_summary[cluster][\"genus\"].apply(lambda x: x/total_genes * 100)\n genus_percent.columns = [\"percent\"]\n table_summary[cluster][\"genus\"] = pd.concat([table_summary[cluster][\"genus\"],genus_percent],axis=1)\n\n #read above for fix\n cog_percent = table_summary[cluster][\"eggNOG\"].apply(lambda x: x/table_summary[cluster][\"gene_cog\"] * 100)\n cog_percent.columns = [\"percent\"]\n table_summary[cluster][\"eggNOG\"] = pd.concat([table_summary[cluster][\"eggNOG\"],cog_percent],axis=1)\n\n #Print the data\n if verbal:\n print \"Cluster %s:\\n\" % cluster\n print \"Number of Samples: %d\\n\" % table_summary[cluster][\"N\"]\n print \"Taxonomy:\"\n print table_summary[cluster][\"phylum\"].sort(\"percent\", ascending=False)\n print \"----------------------------------\"\n print table_summary[cluster][\"genus\"].sort(\"percent\", ascending=False)\n print \"-----------------------------------\"\n print \"COGS:\"\n print table_summary[cluster][\"eggNOG\"].sort(\"percent\", ascending=False)\n print \"------------------------------------\"\n print \"End Summary\"", "def table_stats(self, db, dest, kvargs, lines):\n if 'table' in kvargs:\n tables = [db.get_table(kvargs['table'])]\n else:\n tables = db.tables()\n options = kvargs.get('options','')\n done = False\n for table in db.tables():\n print(\"======================= {} =======================\".format(table.name))\n if 'dump' in options:\n print(\"schema dump:\")\n table.dump()\n print(\"\")\n if 'head' in options:\n print(\"First 5 records:\")\n for source_record in db.read_records_as_dicts(tablename=table.name, limit=5):\n print(source_record)\n print(\"\")\n # Compute single-variable stats on each of the variables\n sw = stopwatch().start()\n print(\"Computing statistics...\")\n stats = {}\n census_checksum = 0\n \n if self.spark_context:\n print(\"Using spark to read {} ... assuming first line has headings\".format(table.filename))\n sc = self.spark_context\n data = sc.textFile(table.filename)\n header = data.first() # extract the header\n stats = data.filter(lambda row:row!=header).map(table.parse_line_to_dict).reduce(stats_reducer)\n else:\n try:\n for source_record in db.read_records_as_dicts(tablename=table.name,limit=self.limit):\n if source_record['RECTYPE']=='P':\n census_checksum += census_person_polynominal(source_record)\n stats = stats_reducer(source_record, stats)\n except KeyboardInterrupt as e:\n print(\"*** KeyboardInterrupt at count: {}\".format(stats[':count']))\n done = True\n if stats:\n print(\"total records: {} speed: {:8.0f} records/sec\".format( stats[':count'], stats[':count']/sw.elapsed()))\n tt = tytable.ttable()\n tt.add_head(['variable','min','avg','max'])\n tt.set_col_alignment(1,tytable.ttable.RIGHT)\n tt.set_col_alignment(2,tytable.ttable.RIGHT)\n tt.set_col_alignment(3,tytable.ttable.RIGHT)\n for key in stats_variable_names(stats):\n try:\n tt.add_data([key, stats[key+\":min\"], stats[key+\":sum\"] / stats[':count'], stats[key+\":max\"]])\n except TypeError:\n tt.add_data([key, stats[key+\":min\"], \"\", stats[key+\":max\"]])\n print(tt.typeset(mode=tytable.TEXT))\n if census_checksum:\n print(\"Census checksum: {}\".format(census_checksum))\n print(\"\")\n if done:\n return True # had the keyboard abort\n return True", "def print_all_counts_as_shasta_matrix(all_counts, max_count=50, pseudocount=1):\n a_t_counts = all_counts[\"A\"] + all_counts[\"T\"]\n g_c_counts = all_counts[\"G\"] + all_counts[\"C\"]\n\n total = 0\n for i in range(max_count + 1):\n total += max(pseudocount, a_t_counts[i])\n\n line = list()\n for i in range(max_count + 1):\n count = max(pseudocount, a_t_counts[i])\n line.append(\"%.9f\" % math.log((count/total),10))\n\n print(\">AT prior\")\n print(\",\".join(line))\n print()\n\n total = 0\n for i in range(max_count + 1):\n total += max(pseudocount, g_c_counts[i])\n\n line = list()\n for i in range(max_count + 1):\n count = max(pseudocount, g_c_counts[i])\n line.append(\"%.9f\" % math.log((count/total),10))\n\n print(\">GC prior\")\n print(\",\".join(line))\n print()", "def _create_metrics_table(font, format, base, create_glyph_metrics):\n # we don't set PCF_COMPRESSED_METRICS\n metrics = tuple(create_glyph_metrics(_g, base) for _g in font.glyphs)\n table_bytes = (\n bytes(le.uint32(format))\n + bytes(base.int32(len(metrics)))\n + b''.join(bytes(_t) for _t in metrics)\n )\n return table_bytes, format", "def annotate_tsv_freq(in_tsv_gz,annotation_tsv):\n sys.stderr.write(\"Reading TSV file ...\\n\")\n nicollo = pd.read_csv(BOLLI, sep=\"\\t\")\n nicollo = nicollo.iloc[:,[1,2,4,5,23]]\n nicollo_counts = nicollo.groupby(['CHR','START'])['MT'].count()\n nol_var = nicollo.drop(['WT','MT'], axis = 1) \n nol_var = nol_var.set_index(['CHR', 'START'])\n\n #nicollo_counts = nicollo.groupby([\"CHR\",\"START\",\"WT\",\"MT\"]).size().reset_index(name=\"count\")\n #nicollo_counts = nicollo_counts[[\"CHR\", \"START\",\"count\"]].set_index(['CHR','START'])\n\n mmrf = pd.read_csv('/ifs/res/leukgen/home/yellapav/MMRF/MMRF_CoMMpass_IA9_All_Canonical_Variants.txt', sep=\"\\t\")\n mmrf=mmrf.iloc[:,[0,1,2,4,5,19,23]]\n mmrf=mmrf.drop_duplicates()\n\n mmrfM=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].median()\n mmrfC=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].count()\n mmrfQ25=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].quantile(q=0.25)\n mmrfQ75=mmrf.groupby(['CHROM','POS'])['GEN[1].AR'].quantile(q=0.75)\n \n\n #anno_tsv = pd.read_csv(annotation_tsv, comment='#',sep=\"\\t\")\n anno_tsv = pd.read_csv(annotation_tsv, comment='#',sep=\"\\t\", low_memory=False)\n #anno_tsv[anno_tsv['FILTER'] == \"PASS\"]\n counts_tsv=anno_tsv.groupby([\"CHR\",\"START\",\"REF\",\"ALT\"]).size().reset_index(name=\"count\")\n counts_tsv=counts_tsv[[\"CHR\", \"START\",\"count\"]].set_index(['CHR','START'])\n counts_median=anno_tsv.groupby(['CHR','START'])['TARGET_VAF'].median()\n\n\n\n inFile = gzip.open(in_tsv_gz,'r')\n \n sys.stderr.write(\"Annotating ...\\n\")\n for record in inFile:\n record=record.decode(\"utf-8\")\n record=record.rstrip()\n recArr=record.split(\"\\t\")\n \n cl = [] \n freq = [] \n medVAF = [] \n Q25 = [] \n Q75 = [] \n positions = [] \n normal = \"0\" \n normalVAF = \"0\" \n bolli_cl = [] \n bolli_freq = [] \n bolli_positions = [] \n bolli_anno = [] \n flag = 0\n bolli_flag = 0\n if record.startswith(\"#\"):\n continue\n\n if recArr[0] == \"ID_VARIANT\":\n cl = \"MMRF_Class\"\n freq = \"MMRF_Frequency\"\n medVAF = \"MMRF_VAF\"\n Q25 = \"MMRF_Q25\"\n Q75 = \"MMRF_Q75\"\n positions = \"MMRF_Positions\"\n normal = \"Normals_Frequency\"\n normalVAF = \"Normals_median_VAF\"\n bolli_cl = \"Bolli_Class\"\n bolli_freq = \"Bolli_Frequency\"\n bolli_positions = \"Bolli_Positions\"\n bolli_anno = \"Bolli_Annotation\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions, bolli_cl, bolli_freq, bolli_anno, bolli_positions, normal, normalVAF ]\n record = (\"\\t\".join(record))\n print(record)\n continue\n\n try:\n chrom = str(recArr[3])\n pos = int(recArr[4])\n start = int(recArr[4]) - 9\n end = int(recArr[4]) + 9\n if (chrom, pos) in mmrfC.index:\n cl = \"genomic_exact\"\n freq = str(mmrfC.loc[(chrom,pos)])\n medVAF = str(mmrfM.loc[(chrom,pos)])\n Q25 = str(mmrfQ25.loc[(chrom,pos)])\n Q75 = str(mmrfQ75.loc[(chrom,pos)])\n positions = str(pos)\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n flag = 1\n if flag == 0:\n mmrfCsub=mmrfC.loc[chrom]\n if not mmrfCsub[(mmrfCsub.index >= start) & (mmrfCsub.index <= end)].empty:\n for i in mmrfCsub[(mmrfCsub.index >= start) & (mmrfCsub.index <= end)].index.values:\n cl = \"genomic_close\"\n freq.append(str(mmrfC.loc[(chrom,i)]))\n medVAF.append(str(mmrfM.loc[(chrom,i)]))\n Q25.append(str(mmrfQ25.loc[(chrom,i)]))\n Q75.append(str(mmrfQ75.loc[(chrom,i)]))\n positions.append(str(i))\n freq = (\":\".join(freq))\n medVAF = (\":\".join(medVAF))\n Q25 = (\":\".join(Q25))\n Q75 = (\":\".join(Q75))\n positions = (\":\".join(positions))\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n else:\n cl = \"NA\"\n freq = \"NA\"\n medVAF = \"NA\"\n Q25 = \"NA\"\n Q75 = \"NA\"\n positions = \"NA\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n\n\n except:\n cl = \"NA\"\n freq = \"NA\"\n medVAF = \"NA\"\n Q25 = \"NA\"\n Q75 = \"NA\"\n positions = \"NA\"\n record = [ record, cl, freq, medVAF, Q25, Q75, positions ]\n record = (\"\\t\".join(record))\n\n\n\n try:\n chrom = str(recArr[3])\n pos = int(recArr[4])\n start = int(recArr[4]) - 9\n end = int(recArr[4]) + 9\n if (chrom, pos) in nicollo_counts.index:\n bolli_cl = \"genomic_exact\"\n bolli_freq = str(nicollo_counts.loc[(chrom,pos)]) \n bolli_positions = str(pos)\n bolli_anno = str(nol_var.loc[chrom, pos]['Variant_class'].values[0])\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n bolli_flag = 1\n\n\n if bolli_flag == 0: \n nicollo_counts_sub=nicollo_counts.loc[chrom]\n if not nicollo_counts_sub[(nicollo_counts_sub.index >= start) & (nicollo_counts_sub.index <= end)].empty:\n for i in nicollo_counts_sub[(nicollo_counts_sub.index >= start) & (nicollo_counts_sub.index <= end)].index.values:\n #if not nicollo_counts_sub.ix[start:end].empty:\n # for i in nicollo_counts_sub.ix[start:end].index.values:\n #print(\"XXXXXXX\",i, nicollo_counts_sub.loc[(chrom,i)], start, end)\n bolli_cl = \"genomic_close\"\n bolli_freq.append(str(nicollo_counts.loc[(chrom,i)]))\n bolli_anno.append(str(nol_var.loc[(chrom,i)]['Variant_class'].values[0]))\n bolli_positions.append(str(i))\n bolli_freq = (\":\".join(bolli_freq))\n bolli_positions = (\":\".join(bolli_positions))\n bolli_anno = (\":\".join(bolli_anno))\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n else:\n bolli_cl = \"NA\"\n bolli_freq = \"NA\"\n bolli_positions = \"NA\"\n bolli_anno = \"NA\"\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n\n\n except:\n bolli_cl = \"NA\"\n bolli_freq = \"NA\"\n bolli_anno = \"NA\"\n bolli_positions = \"NA\"\n record = [ record, bolli_cl, bolli_freq, bolli_anno, bolli_positions ]\n record = (\"\\t\".join(record))\n\n\n normal = \"0\"\n normalVAF = \"0\"\n try:\n chrom=str(recArr[3])\n pos=int(recArr[4])\n normal = counts_tsv.loc[(chrom,pos),\"count\"]\n normal = normal.ix[0]\n normal = str(normal)\n\n normalVAF = str(counts_median.loc[(chrom,pos)])\n\n record = [ record, normal, normalVAF ]\n record = (\"\\t\".join(record))\n print(record)\n\n except:\n normal = \"0\"\n normalVAF = \"0\"\n record = [ record, str(normal), str(normalVAF) ]\n record = (\"\\t\".join(record))\n print(record)", "def format_used_features(model_dir):\n feature_keys = {\n \"indel_complexity\": \"ICP\",\n \"dissimilarity\": \"DSM\",\n \"indel_size\": \"ISZ\",\n \"repeat\": \"REP\",\n \"is_uniq_mapped\": \"UQM\",\n \"is_near_boundary\": \"NEB\",\n \"equivalence_exists\": \"EQX\",\n \"is_bidirectional\": \"BID\",\n \"is_multiallelic\": \"MTA\",\n \"is_inframe\": \"FRM\",\n \"is_splice\": \"SPL\",\n \"is_truncating\": \"TRN\",\n \"is_in_cdd\": \"CDD\",\n \"indel_location\": \"LOC\",\n \"is_nmd_insensitive\": \"NMD\",\n \"ipg\": \"IPG\",\n \"cds_length\": \"LEN\",\n \"lc\": \"LC\",\n \"local_lc\": \"LLC\",\n \"gc\": \"GC\",\n \"local_gc\": \"LGC\",\n \"strength\": \"SG\",\n \"local_strength\": \"LSG\",\n \"is_ins\": \"INS\",\n \"is_at_ins\": \"ATI\",\n \"is_at_del\": \"ATD\",\n \"is_gc_ins\": \"GCI\",\n \"is_gc_del\": \"GCD\",\n \"ref_count\": \"REFC\",\n \"alt_count\": \"ALTC\",\n \"is_on_db\": \"SNP\",\n }\n\n feature_dict = make_feature_dict(model_dir)\n\n features_used_for_sni = [\n feature_keys[f] for f in feature_dict[\"single_nucleotide_indels\"]\n ]\n features_used_for_mni = [\n feature_keys[f] for f in feature_dict[\"multi_nucleotide_indels\"]\n ]\n features_used_for_sni.sort()\n features_used_for_mni.sort()\n\n d = {}\n d[\"##features_used_for_single_nucleotide_indels\"] = \";\".join(features_used_for_sni)\n d[\"##features_used_for_multi_nucleotide_indels\"] = \";\".join(features_used_for_mni)\n\n return d", "def transpose_labels(df, sort=False):\n label = 'gtruth'\n temp_gtruth = df[df['label'] == 'gtruth']\n for sample_method_to_test in ['lab', 'pier']:\n temp_gtruth = temp_gtruth \\\n .rename({\n f'{sample_method_to_test} total abundance': f'{sample_method_to_test} {label} total abundance',\n f'{sample_method_to_test} raw count': f'{sample_method_to_test} {label} raw count',\n f'{sample_method_to_test} nrmlzd raw count': f'{sample_method_to_test} {label} nrmlzd raw count',\n f'{sample_method_to_test} relative abundance': f'{sample_method_to_test} {label} relative abundance',\n f'{sample_method_to_test} cells/mL': f'{sample_method_to_test} {label} cells/mL'},\n axis=1)\n\n temp_gtruth = temp_gtruth.drop('label', axis=1)\n\n label = 'predicted'\n temp_pred = df[df['label'] == 'prediction']\n for sample_method_to_test in ['lab', 'pier']:\n temp_pred = temp_pred \\\n .rename({\n f'{sample_method_to_test} total abundance': f'{sample_method_to_test} {label} total abundance',\n f'{sample_method_to_test} raw count': f'{sample_method_to_test} {label} raw count',\n f'{sample_method_to_test} nrmlzd raw count': f'{sample_method_to_test} {label} nrmlzd raw count',\n f'{sample_method_to_test} relative abundance': f'{sample_method_to_test} {label} relative abundance',\n f'{sample_method_to_test} cells/mL': f'{sample_method_to_test} {label} cells/mL'},\n axis=1)\n temp_pred = temp_pred.drop('label', axis=1)\n\n merge_col = ['class', 'datetime', 'sampling time']\n micro_col = [col for col in df.columns if col.startswith('micro')]\n if all(mc in df.columns for mc in micro_col):\n merge_col += micro_col\n\n concat = temp_pred.merge(temp_gtruth, on=merge_col)\n\n # sort dataframe\n if sort:\n col = sorted(concat.columns)\n concat = concat[col[:2] + [col[-1]] + col[2:-1]]\n\n return concat", "def convert_to_table_format(package):\n tables = list()\n for primary_table_id in package.primary_table_ids:\n tables.append(StachExtensions.__generate_table(package, primary_table_id))\n return tables", "def collate_samples(tool_name, fields, samples):\n sample_dict = {}\n for sample in samples:\n sample_name = sample['name']\n sample_dict[sample_name] = {}\n tool_result = sample[tool_name]\n for field in fields:\n sample_dict[sample_name][field] = tool_result[field]\n\n return sample_dict", "def data_prep(df, params, if_resample=False):\n\n if if_resample and (params['balanced'] in ['Bootstrap', 'Handsample']):\n if params['balanced'] == 'Bootstrap':\n df = resample(df=df, balance=params['balanced'], nclass=params['classnum'])\n elif params['balanced'] == 'Handsample':\n df = resample(df=df, balance=params['balanced'], nclass=params['classnum'])\n\n if params['classnum'] == 6:\n df.drop(df[df['label']=='PTSD'].index, axis=0, inplace=True)\n\n data = list(df.dialog)\n label_encode = LabelEncoder()\n output = dict()\n output['data'] = data\n output['encoded_label'] = label_encode.fit_transform(df.label)\n output['binary_label'] = label_binarize(y=output['encoded_label'], classes=np.arange(params['classnum']))\n return output, label_encode", "def get_results(self):\n d = {}\n# r = {}\n for analyser in self.xml_tree.getroot():\n for child in analyser:\n if child.tag == 'all-records':\n for record in child:\n attributes = record.attrib\n sample = attributes['sampleId']\n assay_id = attributes['assayId']\n genotype = attributes['genotypeId']\n quality = attributes['description'].split('.')[0]\n if re.match(r'rs\\d+', assay_id):\n if sample in d:\n if assay_id in d[sample]:\n for allele in list(genotype):\n if allele not in d[sample][assay_id]['genotype']:\n d[sample][assay_id]['genotype'] += allele\n if quality not in d[sample][assay_id]['quality']:\n d[sample][assay_id]['quality'].append(quality)\n else:\n d[sample][assay_id] = {'genotype': genotype, 'quality': [quality]}\n else:\n d[sample] = {assay_id: {'genotype': genotype, 'quality': [quality]}}\n# if sample in r:\n# if assay_id in r[sample]:\n# for allele in list(genotype):\n# if allele not in r[sample][assay_id]:\n# r[sample][assay_id] += allele\n# else:\n# r[sample][assay_id] = genotype\n# else:\n# r[sample] = {assay_id: genotype}\n# for k, v in r.items():\n# for k1, v1, in v.items():\n# if len(v1) == 1:\n# v[k1] += v1\n# pprint.pprint(r)\n# df = pd.DataFrame.from_dict(r).transpose()\n# print(df)\n# df.to_excel('snpcheck.xlsx')\n return d", "def _convert_tracks_to_tabular_format(tracks: List[Track]) -> pd.DataFrame:\n track_dfs: List[pd.DataFrame] = []\n\n for track in tracks:\n track_df = pd.DataFrame()\n\n observed_states: List[bool] = []\n timesteps: List[int] = []\n positions_x: List[float] = []\n positions_y: List[float] = []\n headings: List[float] = []\n velocities_x: List[float] = []\n velocities_y: List[float] = []\n\n for object_state in track.object_states:\n observed_states.append(object_state.observed)\n timesteps.append(object_state.timestep)\n positions_x.append(object_state.position[0])\n positions_y.append(object_state.position[1])\n headings.append(object_state.heading)\n velocities_x.append(object_state.velocity[0])\n velocities_y.append(object_state.velocity[1])\n\n track_df[\"observed\"] = observed_states\n track_df[\"track_id\"] = track.track_id\n track_df[\"object_type\"] = track.object_type.value\n track_df[\"object_category\"] = track.category.value\n track_df[\"timestep\"] = timesteps\n track_df[\"position_x\"] = positions_x\n track_df[\"position_y\"] = positions_y\n track_df[\"heading\"] = headings\n track_df[\"velocity_x\"] = velocities_x\n track_df[\"velocity_y\"] = velocities_y\n\n track_dfs.append(track_df)\n\n return pd.concat(track_dfs, ignore_index=True)", "def encoding_labelcount(df, target=None):\n if not target:\n target = ['user_id', 'title']\n\n norm = round(\n df.shape[0] / 10000) # normalize the count by /per 100000 entries\n for col in target:\n df[col + '_labelcount'] = df[col].map(df[col].value_counts()) / norm\n df.drop([col], axis=1, inplace=True)\n return None", "def _prepare_summary_table(self, raw_stats_paths, descr_paths):\n\n sum_tbl = OrderedDict()\n sum_tbl[\"Title\"] = OrderedDict()\n for res in self.rsts:\n sum_tbl[res.reportid] = OrderedDict()\n\n # Add tool information.\n key = \"tool_info\"\n sum_tbl[\"Title\"][key] = \"Data collection tool\"\n for res in self.rsts:\n sum_tbl[res.reportid][key] = f\"{res.info['toolname'].capitalize()} version \" \\\n f\"{res.info['toolver']}\"\n\n # Add datapoint counts.\n key = \"datapoints_cnt\"\n sum_tbl[\"Title\"][key] = \"Datapoints Count\"\n for res in self.rsts:\n sum_tbl[res.reportid][key] = len(res.df.index)\n\n # Add measurement resolution.\n for res in self.rsts:\n key = \"device_resolution\"\n resolution = res.info.get(\"resolution\")\n if resolution:\n sum_tbl[\"Title\"][key] = \"Device Resolution\"\n sum_tbl[res.reportid][key] = f\"{resolution}ns\"\n\n # Add links to the raw statistics directories.\n if raw_stats_paths:\n key = \"raw_stats\"\n sum_tbl[\"Title\"][key] = \"Raw statistics\"\n for res in self.rsts:\n path = raw_stats_paths.get(res.reportid, \"Not available\")\n sum_tbl[res.reportid][key] = path\n\n # Add links to the descriptions.\n if descr_paths:\n key = \"descr\"\n sum_tbl[\"Title\"][key] = \"Test description\"\n for res in self.rsts:\n path = descr_paths.get(res.reportid, \"Not available\")\n sum_tbl[res.reportid][key] = path\n\n return sum_tbl", "def tabulate(store: ObservationStore) -> \\\n Generator[LabelledObservation, None, None]:\n for k in store:\n for ob in store[k]:\n yield when(ob), measured(ob), k", "def flatten(counts):\n single_names = {}\n long_names = {}\n for i in range(len(counts.items())):\n if(len(counts.items()[i][0].split(\" \")) <= 1):\n single_names[str(counts.items()[i][0])] = counts.items()[i][1]\n else:\n long_names[str(counts.items()[i][0])] = counts.items()[i][1]\n \n starter_list = [[[x[0]],x[1]] for x in long_names.items()]\n for i in range(len(single_names.items())):\n matched = False\n for j in range(len(starter_list)):\n if(single_names.items()[i][0] in starter_list[j][0][0].split(\" \")):\n starter_list[j][0].append(single_names.items()[i][0])\n starter_list[j][1] += single_names.items()[i][1]\n matched = True\n break\n \n if(matched == False):\n starter_list.append([[single_names.items()[i][0]], single_names.items()[i][1]]) \n \n \n return starter_list", "def test_format_unifrac_sample_mapping(self):\r\n a = [[1, 0, 0], [0, 2, 4], [7, 0, 9.0]]\r\n otu_ids = ['OTUa', 'OTUb', 'OTUc']\r\n sample_ids = ['Sa', 'Sb', 'Sc']\r\n result = format_unifrac_sample_mapping(sample_ids, otu_ids, a)\r\n self.assertEqual(\r\n result,\r\n ['OTUa\\tSa\\t1',\r\n 'OTUb\\tSb\\t2',\r\n 'OTUb\\tSc\\t4',\r\n 'OTUc\\tSa\\t7',\r\n 'OTUc\\tSc\\t9.0'])", "def prepare_participants_for_metadata_export(path_to_samples_info, tsca_id): \n raw = pd.read_table(path_to_samples_info)\n print( \"%d Participants in this batch\" % raw['individual_id'].unique().shape[0] )\n # Data to upload\n data = pd.DataFrame(raw.individual_id.drop_duplicates()).rename(columns={'individual_id':'entity:participant_id'})\n return data", "def collate_fn_pad_text_only(batch):\n output = {\n 'id': [],\n 'label': {\n 'intent': [],\n 'semiotic': [],\n 'contextual': [],\n },\n 'caption': [],\n }\n\n for sample in batch:\n output['id'].append(sample['id'])\n output['label']['intent'].append(sample['label']['intent'])\n output['label']['semiotic'].append(sample['label']['semiotic'])\n output['label']['contextual'].append(sample['label']['contextual'])\n output['caption'].append(sample['caption'])\n\n output['label']['intent'] = torch.LongTensor(output['label']['intent'])\n output['label']['semiotic'] = torch.LongTensor(output['label']['semiotic'])\n output['label']['contextual'] = torch.LongTensor(output['label']['contextual'])\n output['caption'] = torch.nn.utils.rnn.pad_sequence(output['caption']).t() # (batch_size, sequence_length)\n return output", "def format_bgc_metadata(df,float_id):\n mdf = df[bgc_metadata_columns]\n bgc_metadata_dict = {}\n for col in list(mdf):\n bgc_metadata_dict[col] = list(pd.Series(mdf[col].unique()).astype(str).str.strip())\n bgc_metadata_dict[col] = list(pd.Series(mdf[col].unique()).astype(str).str.strip().replace(\"'\",'\"'))\n bgc_metadata_dict = json.dumps(bgc_metadata_dict) \n bgc_metadata_df = pd.DataFrame({\"float_id\": [float_id], \"Metadata_Dict\": [bgc_metadata_dict]})\n return bgc_metadata_df", "def count_variants(vcf_list, sample_list):\n\n df_lst = []\n\n sample_vcf_dct = dict(zip(sample_list,vcf_list))\n\n for s in sample_vcf_dct.keys():\n\n vcf_in = sample_vcf_dct[s]\n vcf = VariantFile(vcf_in)\n\n snv = 0\n indel = 0\n\n for rec in vcf:\n\n ref_len = len(rec.ref)\n\n for a in rec.alts:\n if len(a) > 1 or ref_len > 1:\n indel +=1\n else:\n snv +=1\n\n df_lst.append([s,snv,indel])\n\n out_df = pd.DataFrame(df_lst, columns=['sample','snvs','indels'])\n\n return out_df", "def _compute_columns(log: EventLog, prefix_length: int, padding: bool) -> list:\n return [\"trace_id\"] + \\\n sorted(list({\n event['concept:name']\n for trace in log\n for event in trace[:prefix_length]\n })) + \\\n ['0'] if padding else [] + \\\n ['label']", "def feature_table(chr_id, source, orient, genes, transcripts, cds, exons, unk):\n for gname, ginfo in genes.items():\n line = [str(chr_id), \n 'gbk_to_gff',\n ginfo[3],\n str(ginfo[0]),\n str(ginfo[1]),\n '.',\n ginfo[2],\n '.',\n 'ID='+str(gname)+';Name='+str(gname)+';Note='+ginfo[-1]]\n print '\\t'.join(line) \n ## construct the transcript line is not defined in the original file \n t_line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', ginfo[2], '.'] \n\n if not transcripts:\n t_line.append('ID=Transcript:'+str(gname)+';Parent='+str(gname))\n\n if exons: ## get the entire transcript region from the defined feature\n t_line[3] = str(exons[gname][0][0])\n t_line[4] = str(exons[gname][0][-1])\n elif cds:\n t_line[3] = str(cds[gname][0][0])\n t_line[4] = str(cds[gname][0][-1])\n print '\\t'.join(t_line) \n\n if exons:\n exon_line_print(t_line, exons[gname], 'Transcript:'+str(gname), 'exon')\n\n if cds:\n exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'CDS')\n if not exons:\n exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'exon')\n\n else: ## transcript is defined \n for idx in transcripts[gname]: \n t_line[2] = idx[3]\n t_line[3] = str(idx[0])\n t_line[4] = str(idx[1])\n t_line.append('ID='+str(idx[2])+';Parent='+str(gname))\n print '\\t'.join(t_line) \n \n ## feature line print call \n if exons:\n exon_line_print(t_line, exons[gname], str(idx[2]), 'exon')\n if cds:\n exon_line_print(t_line, cds[gname], str(idx[2]), 'CDS')\n if not exons:\n exon_line_print(t_line, cds[gname], str(idx[2]), 'exon')\n\n if len(genes) == 0: ## feature entry with fragment information \n \n line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', orient, '.'] \n fStart = fStop = None \n\n for eid, ex in cds.items(): \n fStart = ex[0][0] \n fStop = ex[0][-1]\n\n for eid, ex in exons.items(): \n fStart = ex[0][0] \n fStop = ex[0][-1]\n\n if fStart or fStart:\n\n line[2] = 'gene'\n line[3] = str(fStart)\n line[4] = str(fStop)\n line.append('ID=Unknown_Gene_' + str(unk) + ';Name=Unknown_Gene_' + str(unk))\n print \"\\t\".join(line)\n\n if not cds:\n line[2] = 'transcript'\n else:\n line[2] = 'mRNA'\n line[8] = 'ID=Unknown_Transcript_' + str(unk) + ';Parent=Unknown_Gene_' + str(unk)\n print \"\\t\".join(line)\n \n if exons:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')\n \n if cds:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'CDS')\n if not exons:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')\n \n unk +=1 \n\n return unk", "def usage_table_format(result):\n table = []\n for item in result:\n row = OrderedDict()\n row['Value'] = item['name']['localizedValue']\n row['Usage'] = item['currentValue'] or \"0\"\n row['Limit'] = item['limit'] or \"0\"\n table.append(row)\n return table", "def distinct_cols_multi_table(table_list=[], remove_columns=['PNR_LOC','PNR_CRT_DTE'], output_format='sql'):\n alfa = \"abcdefghijklmnopqrstuvwxyz\"\n \n all_cols=set()\n all_cols_str=''\n \n for i, table in enumerate(table_list):\n cols=table.columns\n\n #find new cols (cols in B but not in A)\n different_cols = set(table.columns)-all_cols\n\n #remove additional removal cols\n different_cols = different_cols-set(remove_columns)\n\n #append table references\n if output_format == 'sql':\n ref_cols = ',{0}.'.format(alfa[i])+', {0}.'.join(different_cols).format(alfa[i])\n #append to full column list & str\n all_cols = all_cols.union(different_cols)\n all_cols_str = all_cols_str +\"\"+ ref_cols\n \n elif output_format=='pyspark':\n ref_cols = ','.join('{1}.{0}'.format(w,alfa[i]) for w in different_cols)\n #append to full column list & str\n all_cols = all_cols.union(different_cols)\n all_cols_str = all_cols_str +\",\"+ ref_cols\n else:\n print(\"please select output_format = ['pyspark','sql']\")\n \n\n # remove first comma\n if output_format == 'sql':\n return all_cols_str[1:]\n elif output_format=='pyspark':\n return all_cols_str[1:].split(',')", "def legacy_reporter(self):\n logging.info('Creating database-friendly summary report')\n header = '{}\\n'.format(','.join(self.legacy_headers))\n # Create a string to store all the results\n data = str()\n for sample in self.metadata:\n # Add the value of the appropriate attribute to the results string\n data += GenObject.returnattr(sample, 'name')\n # SampleName\n data += GenObject.returnattr(sample.run, 'SamplePlate')\n # Genus\n data += GenObject.returnattr(sample.general, 'closestrefseqgenus')\n # SequencingDate\n data += GenObject.returnattr(sample.run, 'Date')\n # Analyst\n data += GenObject.returnattr(sample.run, 'InvestigatorName')\n # Legacy ConFindr clean/contaminated call\n data += 'ND,'\n # N50\n n50 = GenObject.returnattr(sample.quast, 'N50',\n number=True)\n if n50 != '-,':\n data += n50\n else:\n data += '0,'\n # NumContigs\n data += GenObject.returnattr(sample.quast, 'num_contigs',\n number=True)\n # TotalLength\n data += GenObject.returnattr(sample.quast, 'Total_length',\n number=True)\n # MeanInsertSize\n data += GenObject.returnattr(sample.quast, 'mean_insert',\n number=True)\n # InsertSizeSTD\n data += GenObject.returnattr(sample.quast, 'std_insert',\n number=True)\n # AverageCoverageDepth\n data += GenObject.returnattr(sample.qualimap, 'MeanCoveragedata',\n number=True)\n # CoverageDepthSTD\n data += GenObject.returnattr(sample.qualimap, 'StdCoveragedata',\n number=True)\n # PercentGC\n data += GenObject.returnattr(sample.quast, 'GC',\n number=True)\n # MASH_ReferenceGenome\n data += GenObject.returnattr(sample.mash, 'closestrefseq')\n # MASH_NumMatchingHashes\n data += GenObject.returnattr(sample.mash, 'nummatches')\n # 16S_result\n data += GenObject.returnattr(sample.sixteens_full, 'sixteens_match')\n # 16S PercentID\n data += GenObject.returnattr(sample.sixteens_full, 'percent_id')\n # rMLST_Result\n try:\n # If the number of matches to the closest reference profile is 53, return the profile number\n if sample.rmlst.matches == 53:\n if type(sample.rmlst.sequencetype) is list:\n rmlst_seq_type = ';'.join(sorted(sample.rmlst.sequencetype)).rstrip(';') + ','\n else:\n rmlst_seq_type = GenObject.returnattr(sample.rmlst, 'sequencetype')\n rmlst_seq_type = rmlst_seq_type if rmlst_seq_type != 'ND,' else 'new,'\n data += rmlst_seq_type\n else:\n # Otherwise the profile is set to new\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_Result\n try:\n if sample.mlst.matches == 7:\n if type(sample.mlst.sequencetype) is list:\n mlst_seq_type = ';'.join(sorted(sample.mlst.sequencetype)).rstrip(';') + ','\n else:\n mlst_seq_type = GenObject.returnattr(sample.mlst, 'sequencetype')\n mlst_seq_type = mlst_seq_type if mlst_seq_type != 'ND,' else 'new,'\n data += mlst_seq_type\n else:\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_gene_X_alleles\n try:\n # Create a set of all the genes present in the results (gene name split from allele)\n gene_set = {gene.split('_')[0] for gene in sample.mlst.combined_metadata_results}\n for gene in sorted(gene_set):\n allele_list = list()\n # Determine all the alleles that are present for each gene\n for allele in sample.mlst.combined_metadata_results:\n if gene in allele:\n allele_list.append(allele.replace(' ', '_'))\n # If there is more than one allele in the sample, add both to the string separated by a ';'\n if len(allele_list) > 1:\n data += '{},'.format(';'.join(allele_list))\n # Otherwise add the only allele\n else:\n data += allele_list[0] + ','\n # If there are fewer than seven matching alleles, add a ND for each missing result\n if len(gene_set) < 7:\n data += (7 - len(gene_set)) * 'ND,'\n except AttributeError:\n # data += '-,-,-,-,-,-,-,'\n data += 'ND,ND,ND,ND,ND,ND,ND,'\n # CoreGenesPresent\n data += GenObject.returnattr(sample.gdcs, 'coreresults')\n # E_coli_Serotype\n try:\n # If no O-type was found, set the output to be O-untypeable\n if ';'.join(sample.ectyper.o_type) == '-':\n otype = 'O-untypeable'\n else:\n otype = sample.ectyper.o_type\n # Same as above for the H-type\n if ';'.join(sample.ectyper.h_type) == '-':\n htype = 'H-untypeable'\n\n else:\n htype = sample.ectyper.h_type\n serotype = '{otype}:{htype},'.format(otype=otype,\n htype=htype)\n # Add the serotype to the data string unless neither O-type not H-type were found; add ND instead\n data += serotype if serotype != 'O-untypeable:H-untypeable,' else 'ND,'\n except AttributeError:\n data += 'ND,'\n # SISTR_serovar_antigen\n data += GenObject.returnattr(sample.sistr, 'serovar_antigen').rstrip(';')\n # SISTR_serovar_cgMLST\n data += GenObject.returnattr(sample.sistr, 'serovar_cgmlst')\n # SISTR_serogroup\n data += GenObject.returnattr(sample.sistr, 'serogroup')\n # SISTR_h1\n data += GenObject.returnattr(sample.sistr, 'h1').rstrip(';')\n # SISTR_h2\n data += GenObject.returnattr(sample.sistr, 'h2').rstrip(';')\n # SISTR_serovar\n data += GenObject.returnattr(sample.sistr, 'serovar')\n # GeneSeekr_Profile\n try:\n if sample.genesippr.report_output:\n data += ';'.join(sample.genesippr.report_output) + ','\n else:\n data += 'ND,'\n except AttributeError:\n data += 'ND,'\n # Vtyper_Profile\n data += GenObject.returnattr(sample.verotoxin, 'verotoxin_subtypes_set')\n # AMR_Profile and resistant/sensitive status\n if sample.resfinder_assembled.pipelineresults:\n # Profile\n for resistance, resistance_set in sorted(sample.resfinder_assembled.pipelineresults.items()):\n data += '{res}({r_set});'.format(res=resistance.replace(',', ';'),\n r_set=';'.join(sorted(list(resistance_set))))\n data += ','\n # Resistant/Sensitive\n data += 'Resistant,'\n else:\n # Profile\n data += 'ND,'\n # Resistant/Sensitive\n data += 'Sensitive,'\n # Plasmid Result'\n if sample.mobrecon.pipelineresults:\n for plasmid, details in sorted(sample.mobrecon.pipelineresults.items()):\n data += '{plasmid}({details});'.format(plasmid=plasmid,\n details=details)\n data += ','\n else:\n data += 'ND,'\n # TotalPredictedGenes\n data += GenObject.returnattr(sample.prodigal, 'predictedgenestotal',\n number=True)\n # PredictedGenesOver3000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover3000bp',\n number=True)\n # PredictedGenesOver1000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover1000bp',\n number=True)\n # PredictedGenesOver500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover500bp',\n number=True)\n # PredictedGenesUnder500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesunder500bp',\n number=True)\n # NumClustersPF\n data += GenObject.returnattr(sample.run, 'NumberofClustersPF')\n # Percentage of reads mapping to PhiX control\n data += GenObject.returnattr(sample.run, 'phix_aligned')\n # Error rate calculated from PhiX control\n data += GenObject.returnattr(sample.run, 'error_rate')\n # LengthForwardRead\n data += GenObject.returnattr(sample.run, 'forwardlength',\n number=True)\n # LengthReverseRead\n data += GenObject.returnattr(sample.run, 'reverselength',\n number=True)\n # Real time strain\n data += GenObject.returnattr(sample.run, 'Description')\n # Flowcell\n data += GenObject.returnattr(sample.run, 'flowcell')\n # MachineName\n data += GenObject.returnattr(sample.run, 'instrument')\n # PipelineVersion\n data += self.commit + ','\n # AssemblyDate\n data += datetime.now().strftime('%Y-%m-%d') + ','\n # SamplePurity\n data += GenObject.returnattr(sample.confindr, 'num_contaminated_snvs')\n # cgMLST\n try:\n if type(sample.cgmlst.sequencetype) is list:\n if sample.cgmlst.sequencetype:\n cgmlst_seq_type = ';'.join(sorted(sample.cgmlst.sequencetype)).rstrip(';') + ','\n else:\n cgmlst_seq_type = 'ND,'\n else:\n cgmlst_seq_type = GenObject.returnattr(sample.cgmlst, 'sequencetype')\n # cgmlst_seq_type = cgmlst_seq_type if cgmlst_seq_type != 'ND,' else 'new,'\n data += cgmlst_seq_type\n except AttributeError:\n data += 'ND,'\n # Name of the database used in the analyses\n data += os.path.split(self.reffilepath)[-1] + ','\n # Database download date\n data += self.download_date\n # Append a new line to the end of the results for this sample\n data += '\\n'\n # Replace any NA values with ND\n cleandata = data.replace('NA', 'ND')\n with open(os.path.join(self.reportpath, 'legacy_combinedMetadata.csv'), 'w') as metadatareport:\n metadatareport.write(header)\n metadatareport.write(cleandata)", "def generate_aggregates(self) -> None:\n self.create_count_map()\n self.create_total_count()\n self.create_n_1_gram_map()", "def count_frequency(df, count_columns: list, group_columns=['Fabric_name', 'Fabric_label'], margin_column_row:tuple=None):\n\n if margin_column_row and len(margin_column_row) == 2:\n if all([isinstance(element, bool) for element in margin_column_row]):\n # margin_column_row = ((False, False),) * len(count_columns)\n margin_column_row = (margin_column_row, ) * len(count_columns)\n\n # by default keep summary row but remove summary column\n if not margin_column_row:\n margin_column_row = ((False, True),) * len(count_columns)\n if len(count_columns) != len(margin_column_row):\n print('\\n')\n print('Parameters count_columns and margin_column_row in count_frequency function have different length')\n exit()\n\n index_lst = [df[column] for column in group_columns if column in df.columns and df[column].notna().any()]\n frequency_df = pd.DataFrame()\n\n for column, (margin_column, margin_row) in zip(count_columns, margin_column_row):\n if column in df.columns and df[column].notna().any():\n df[column].fillna(np.nan, inplace=True)\n current_df = pd.crosstab(index=index_lst, columns=df[column], margins=any((margin_column, margin_row)))\n current_df = current_df.sort_index()\n if any((margin_column, margin_row)):\n # drop column All\n if not margin_column:\n current_df.drop(columns=['All'], inplace=True)\n # drop row All\n if not margin_row:\n current_df.drop(index=['All'], inplace=True)\n if frequency_df.empty:\n frequency_df = current_df.copy()\n else:\n frequency_df = frequency_df.merge(current_df, how='outer', on=group_columns)\n\n frequency_df.fillna(0, inplace=True) \n frequency_df.reset_index(inplace=True) \n return frequency_df", "def count_frequency(df, count_columns: list, group_columns=['Fabric_name', 'Fabric_label'], margin_column_row:tuple=None):\n\n if margin_column_row and len(margin_column_row) == 2:\n if all([isinstance(element, bool) for element in margin_column_row]):\n # margin_column_row = ((False, False),) * len(count_columns)\n margin_column_row = (margin_column_row, ) * len(count_columns)\n\n # by default keep summary row but remove summary column\n if not margin_column_row:\n margin_column_row = ((False, True),) * len(count_columns)\n if len(count_columns) != len(margin_column_row):\n print('\\n')\n print('Parameters count_columns and margin_column_row in count_frequency function have different length')\n exit()\n\n index_lst = [df[column] for column in group_columns if column in df.columns and df[column].notna().any()]\n frequency_df = pd.DataFrame()\n\n for column, (margin_column, margin_row) in zip(count_columns, margin_column_row):\n if column in df.columns and df[column].notna().any():\n df[column].fillna(np.nan, inplace=True)\n current_df = pd.crosstab(index=index_lst, columns=df[column], margins=any((margin_column, margin_row)))\n current_df = current_df.sort_index()\n if any((margin_column, margin_row)):\n # drop column All\n if not margin_column:\n current_df.drop(columns=['All'], inplace=True)\n # drop row All\n if not margin_row:\n current_df.drop(index=['All'], inplace=True)\n if frequency_df.empty:\n frequency_df = current_df.copy()\n else:\n frequency_df = frequency_df.merge(current_df, how='outer', on=group_columns)\n\n frequency_df.fillna(0, inplace=True) \n frequency_df.reset_index(inplace=True) \n return frequency_df", "def summary_scores(fold_scores_list): \n d = {}\n nulls = {}\n for idx, score in enumerate(fold_scores_list):\n d.update({f\"{idx+1}0 samples:\": score[0].T})\n nulls.update({f\"{idx+1}0 samples:\": score[1].T})\n \n summary_scores = pd.concat(d.values(), axis=1, keys=d.keys()).T\n summary_nulls = pd.concat(nulls.values(), axis=1, keys=nulls.keys()).T\n return summary_scores, summary_nulls", "def table_gen(NamesL_pairs, p_pL, m_mL, p_mL, m_pL, p_valsL, p_vals_BonferoniL, RatiosL, p_valsL_divergent_convergent,\n p_valsL_divergent_convergent_BonferoniL, RatiosL_divergent_convergent, output_table):\n datafile = open(output_table, \"w\")\n datafile.write(\n \"Feature_1\" + '\\t' + \"Feature_2\" + \"\\t\" + \"plus_plus\" + '\\t' + \"minus_minus\" + '\\t' + \"plus_minus\" + '\\t' + \"minus_plus\" + '\\t' + \"p_value_same_opposite\" + '\\t' + \"p-value_same_opposite_Bonferoni_corrected\" + '\\t' + \"Ratio_same_opposite\" + '\\t' + \"p_value_divergent_convergent\" + '\\t' + \"p_value_divergent_convergent Bonferoni corrected\" + '\\t' + \"Ratio divergent convergent\" + '\\n')\n for i in range(len(NamesL_pairs)):\n datafile.write(\n NamesL_pairs[i][0] + '\\t' + NamesL_pairs[i][1] + '\\t' + str(p_pL[i]) + '\\t' + str(m_mL[i]) + '\\t' + str(\n p_mL[i]) + '\\t' + str(m_pL[i]) + '\\t' + str(p_valsL[i]) + '\\t' + str(p_vals_BonferoniL[i]) + '\\t' + str(\n RatiosL[i]) + '\\t' + str(p_valsL_divergent_convergent[i]) + '\\t' + str(\n p_valsL_divergent_convergent_BonferoniL[i]) + '\\t' + str(RatiosL_divergent_convergent[i]) + '\\n')\n datafile.close()\n return", "def collate_fn(data):\n\toutput = dict()\n\n\tfor name in ['answer_ID','query_ID']:\n\t\toutput[name] = [ _[name] for _ in data]\n\n\n\tfor name in ['query_len','answer_len']:\n\t\ttemp = [ _[name] for _ in data]\t \n\t\toutput[name] = torch.stack(temp, dim=0) \n\t\n\t#deal with source and target\n\tfor name in ['answer','query']:\n\t\tlength = output['{0}_len'.format(name)]\n\t\tl = length.max().item()\n\n\t\tfor i in range(len(data)):\n\t\t\tif(l-length[i].item()>0):\n\t\t\t\tdata[i][name] = torch.cat([data[i][name],torch.zeros(l-length[i].item(),dtype=torch.long)],dim=-1)\n\n\t\ttemp = [ _[name] for _ in data]\n\t\t\n\t\toutput[name] = torch.stack(temp, dim=0).long()\n\t\t\n\n\treturn output", "def reduce_data_to_necessary_columns(filtered_df):\n hist_df = filtered_df[\n [\n \"UniqueName\",\n \"Joins\",\n \"Projection_Attributes\",\n \"Selection_Attributes\",\n \"GroupBy\",\n \"OrderBy\",\n \"Strings\",\n \"Tables\",\n ]\n ].set_index(\"UniqueName\")\n return hist_df", "def buildAdsTable_v1(output_file = None):\r\n ads_table = []\r\n text_props = ['readability_text', '_all']\r\n onto_props_with_mapping = {'phone':['telephone.name', 'telephone.name.raw'], 'email': ['email.name', 'email.name.raw'],\r\n 'posting_date':['inferlink_date', 'readability_date', 'high_recall_readability_date'],\r\n 'price':['price'], 'location':['addressLocality'],\r\n 'name':['name'],\r\n 'ethnicity':['ethnicity'],\r\n 'eye_color':['eyeColor'], 'title':['title'],\r\n 'hair_color':['hairColor'], 'nationality':['nationality'],\r\n 'business_type':['business_type'],\r\n 'business_name':['streetAddress'], 'services':['serviceType'],\r\n 'business': ['streetAddress'],\r\n 'physical_address': ['streetAddress'],\r\n 'gender':['gender'], 'top_level_domain':['top_level_domain'],\r\n 'obfuscation':['telephone.isObfuscated', 'email.isObfuscated'],\r\n 'age':['age'], 'hyperlink:':['relatedLink'], 'drug_use':['drug_use'],\r\n 'review_site':['review_site'], 'review_id':['review_id'],\r\n 'number_of_individuals':['name_count'],\r\n 'ad': ['identifier'],\r\n 'multiple_phone': ['telephone_count'],\r\n 'cluster': ['seller.uri'],\r\n 'seed': ['seller.telephone.name', 'seller.email.name']\r\n }\r\n non_readability_props = ['number_of_individuals', 'ad', 'multiple_phone', 'cluster', 'phone', 'posting_date', 'email']\r\n onto_props_without_mapping = ['image_with_email', 'image_with_phone']\r\n for property, value_list in onto_props_with_mapping.iteritems():\r\n dict = {}\r\n dict['onto_prop'] = property\r\n mappings = []\r\n tmp = {}\r\n for v in value_list:\r\n if property == 'phone' or v == 'seller.telephone.name':\r\n tmp[v] = 'build_phone_match_clause'\r\n tmp['_all'] = 'build_phone_match_clause'\r\n tmp['url'] = 'build_phone_regexp_clause'\r\n elif v == 'email.name':\r\n tmp[v] = 'build_email_match_clause'\r\n tmp['_all'] = 'build_match_phrase_clause'\r\n elif property == 'ad':\r\n tmp[v] = 'build_term_clause'\r\n elif '_count' in v:\r\n tmp[v] = 'build_count_match_clause'\r\n elif property == 'gender':\r\n tmp[v] = 'build_gender_match_clause'\r\n elif property == 'posting_date':\r\n tmp[v] = 'build_match_phrase_clause'\r\n else:\r\n tmp[v] = 'build_match_clause'\r\n if property not in non_readability_props:\r\n for v in text_props: # will overwrite for seller.telephone.name\r\n tmp[v] = 'build_match_clause_inner'\r\n mappings.append(tmp)\r\n dict['mappings'] = mappings\r\n ads_table.append(dict)\r\n\r\n for property in onto_props_without_mapping:\r\n dict = {}\r\n dict['onto_prop'] = property\r\n mappings = []\r\n tmp = {}\r\n for v in text_props:\r\n tmp[v] = 'build_match_clause_inner'\r\n mappings.append(tmp)\r\n dict['mappings'] = mappings\r\n ads_table.append(dict)\r\n\r\n if output_file:\r\n file = codecs.open(output_file, 'w', 'utf-8')\r\n for entry in ads_table:\r\n json.dump(entry, file)\r\n file.write('\\n')\r\n file.close()", "def get_summary_of_records(self):\n ids = self.get_saleman_ids()\n table = [\n [\"Seller name\",\"Number of sales\",\"Total Value ($)\"]\n ]\n for id in ids:\n table_id = [self.get_seller_name(id),self.get_number_of_sales(id),\n self.get_total_of_saleman(id)]\n table.append(table_id)\n data_table = AsciiTable(table)\n print(data_table.table)", "def convert_data(test_data,params,list_dict,rational_present=True,topk=2):\n \"\"\"input: params -- input dict, list_dict -- previous predictions containing rationals\n rational_present -- whether to keep rational only or remove them only\n topk -- how many words to select\"\"\"\n \n temp_dict={}\n for ele in list_dict:\n temp_dict[ele['annotation_id']]=ele['rationales'][0]['soft_rationale_predictions']\n \n test_data_modified=[]\n \n for index,row in tqdm(test_data.iterrows(),total=len(test_data)):\n try:\n attention=temp_dict[row['Post_id']]\n except KeyError:\n continue\n topk_indices = sorted(range(len(attention)), key=lambda i: attention[i])[-topk:]\n new_text =[]\n new_attention =[]\n if(rational_present):\n if(params['bert_tokens']):\n new_attention =[0]\n new_text = [101]\n for i in range(len(row['Text'])):\n if(i in topk_indices):\n new_text.append(row['Text'][i])\n new_attention.append(row['Attention'][i])\n if(params['bert_tokens']):\n new_attention.append(0)\n new_text.append(102)\n else:\n for i in range(len(row['Text'])):\n if(i not in topk_indices):\n new_text.append(row['Text'][i])\n new_attention.append(row['Attention'][i])\n test_data_modified.append([row['Post_id'],new_text,new_attention,row['Label']])\n\n df=pd.DataFrame(test_data_modified,columns=test_data.columns)\n return df", "def parse_trflp(lines):\r\n\r\n sample_ids = []\r\n otu_ids = []\r\n data = []\r\n non_alphanum_mask = re.compile('[^\\w|^\\t]')\r\n # not sure why the above regex doesn't cover the following regex...\r\n dash_space_mask = re.compile('[_ -]')\r\n\r\n for i, line in enumerate(lines):\r\n elements = line.strip('\\n').split('\\t')\r\n\r\n # special handling for the first line only\r\n if i == 0:\r\n # validating if the file has a header\r\n if elements[0] == '':\r\n for otu_id in elements[1:]:\r\n otu_ids.append(non_alphanum_mask.sub('_', otu_id))\r\n continue\r\n else:\r\n for j, otu_id in enumerate(elements[1:]):\r\n otu_ids.append(non_alphanum_mask.sub('_', 'Bin%3d' % j))\r\n\r\n # handling of all other lines\r\n current_row = []\r\n\r\n # converting each value in the row to int\r\n for count in elements[1:]:\r\n try:\r\n current_row.append(int(round(float(count), 0)))\r\n except ValueError:\r\n current_row.append(0)\r\n\r\n # if the sum of all the values is equial to 0 ignore line\r\n if sum(current_row) == 0:\r\n continue\r\n\r\n # adding sample header to list\r\n sample_ids.append(non_alphanum_mask.sub('.',\r\n dash_space_mask.sub('.', elements[0])))\r\n\r\n # validating the size of the headers to add missing columns\r\n # this is only valid when there is no header\r\n if len(current_row) > len(otu_ids):\r\n # modify header data\r\n extra_cols = []\r\n for j in range(len(otu_ids), len(current_row)):\r\n extra_cols.append(non_alphanum_mask.sub('_', 'Bin%3d' % j))\r\n # modify data\r\n for j in range(len(data)):\r\n data[j].extend([0] * (len(current_row) - len(otu_ids)))\r\n\r\n otu_ids.extend(extra_cols)\r\n elif len(current_row) < len(otu_ids):\r\n # modify data\r\n current_row.extend([0] * (len(otu_ids) - len(current_row)))\r\n\r\n data.append(current_row)\r\n\r\n return sample_ids, otu_ids, asarray(data).transpose()", "def reformat_analogs(analogs, prefix = 'modified-analogfile.tsv'):\n # import numpy as np\n import pandas as pd\n\n try:\n num_cols = 3 # dateAnlg, Dis, Corr\n\n # Create dataframe and read in output csv file of analogs process\n dfS = pd.DataFrame()\n dfS = pd.read_csv(analogs, delimiter=r\"\\s+\", index_col=0)\n\n # Find number of analogues\n num_analogues = (dfS.shape[1]) / 3\n # LOGGER.debug('num_analogues: %s', num_analogues)\n\n # Define temporary df\n df_anlg = dfS.iloc[:, 0:num_analogues] # store only anlg dates\n df_dis = dfS.iloc[:, num_analogues:2 * num_analogues] # store only dis\n df_corr = dfS.iloc[:, 2 * num_analogues:3 *\n num_analogues] # store only corr\n\n # remove index name before stacking\n df_anlg.index.name = \"\"\n df_dis.index.name = \"\"\n df_corr.index.name = \"\"\n\n dateStack = df_anlg.stack()\n disStack = df_dis.stack().abs() # raw values < 0 so take abs\n corrStack = df_corr.stack()\n\n # Create df of correct dimensions (n x num_cols) using dfS\n df_all = dfS.iloc[:, 0:num_cols] # NB data are placeholders\n # Rename cols\n df_all.columns = ['dateAnlg', 'Dis', 'Corr']\n # Replicate each row 20 times (for dcjs format)\n df_all = df_all.loc[np.repeat(df_all.index.values, num_analogues)]\n # Replace data placeholders with correct values\n df_all['dateAnlg'] = list(dateStack)\n df_all['Dis'] = list(disStack)\n df_all['Corr'] = list(corrStack)\n # Name index col\n df_all.index.name = 'dateRef'\n\n # save to tsv file\n analogs_mod = prefix\n df_all.to_csv(analogs_mod, sep='\\t')\n LOGGER.info('successfully reformatted analog file')\n except Exception:\n msg = 'failed to reformat analog file'\n LOGGER.exception(msg)\n raise Exception(msg)\n return analogs_mod", "def data_in(input_filename, sample_id):\n \n print('Processing sample {}'.format(sample_id))\n # Encoding set to latin1 due to presence of degree symbol\n # newlines in CC2 logger details fields will cause issues\n header_rows = 30\n df_param = pd.read_table(input_filename, \n nrows=header_rows, \n encoding=\"latin1\", \n header=None)\n df_val = pd.read_table(input_filename, skiprows=header_rows)\n df_param_indexed = df_param.set_index(0)\n# redundant due to parsing sample id from filename\n# sample_id = df_param_indexed.loc['Sample ID', 1]\n \n d1 = {1: pd.Series(['', ''], index=['Sample ID', 'Label'])}\n df_val_params = pd.DataFrame(d1)\n df_val_params.loc['Sample ID', 1] = sample_id\n df_val_params.loc['Label', 1] = sample_id\n\n return df_param_indexed, df_val, df_val_params", "def prepare_data(base_df, n_seconds_min=3):\n # Remove too short samples\n source_df = base_df.loc[base_df['seconds'] > n_seconds_min]\n # Group speakers duplicated by id\n df = source_df.loc[:, ['speaker_id', 'dataset_name']]\n df = df.set_index('speaker_id')\n df = df.loc[~df.index.duplicated(keep='first')]\n dfGrouped = source_df.groupby(['speaker_id']).sum()\n # Count the number of samples for each speaker\n dfCountAudio = source_df.groupby(['speaker_id']).count().filepath\n speakers_duration = dfGrouped.join(df)\n speakers_duration = speakers_duration.join(dfCountAudio)\n speakers_duration = speakers_duration.rename(columns={'filepath': 'n_samples'})\n return source_df, speakers_duration", "def aggregate_counts(counts_files,\n output_file = '/dev/stdout', \n sample_names=None, \n sep=\"\\t\", \n header=0, \n comment=\"#\"):\n sample_pos = -1\n \n if sample_names is not None:\n if len(sample_names)!=len(counts_files):\n logging.error(\"Number of sample names is not the same length as \",\n \"the number of counts files.\")\n raise RuntimeError(\"\")\n\n # read in all counts files\n counts_df = [pd.read_csv(file, sep=sep, header=header, comment=comment) \n for file in counts_files]\n\n # overwrite the sample names if provided\n if sample_names:\n for i, df in enumerate(counts_df):\n #counts_df[i].columns[sample_pos] = sample_names[i]\n new_columns = df.columns.tolist()\n new_columns[sample_pos] = sample_names[i]\n df.columns = new_columns\n else:\n # check sample names are all different\n sample_names_from_files = [df.columns[sample_pos] for df in counts_df]\n\n if (len(set(sample_names_from_files))<len(counts_files)):\n logging.error(\"Sample names in counts files are not unique. Fix \",\n \"or provide a list of sample names to use.\")\n raise RunTimeError()\n\n\n # merge the dataframes together\n merged_df = reduce(lambda x, y: pd.merge(x,y), counts_df)\n\n\n # output\n if header is not None:\n out_header = True\n\n with open(output_file, 'w') as handle:\n merged_df.to_csv(handle, sep=sep, header=out_header, index=False)\n\n return 0", "def make_case_metadata_table_query():\n case_aliquot_table_id, study_table_id, case_external_mapping_table_id = get_mapping_table_ids()\n\n file_count_table_name = construct_table_name(API_PARAMS, prefix=BQ_PARAMS['FILE_COUNT_TABLE'])\n file_count_table_id = f\"{BQ_PARAMS['DEV_PROJECT']}.{BQ_PARAMS['META_DATASET']}.{file_count_table_name}\"\n\n return f\"\"\"\n WITH case_project_file_count AS (\n SELECT DISTINCT c.case_id, c.case_submitter_id, c.project_submitter_id, \n s.project_name, s.program_name, s.project_id,\n fc.file_id_count as file_count\n FROM `{case_external_mapping_table_id}` c\n JOIN `{study_table_id}` s\n ON c.project_id = s.project_id\n JOIN `{file_count_table_id}` fc\n ON c.case_id = fc.case_id\n )\n\n SELECT ca.case_id, ca.case_submitter_id, ca.primary_site, ca.disease_type,\n cp.project_name, cp.program_name, cp.project_id, cp.file_count\n FROM `{case_aliquot_table_id}` AS ca\n JOIN case_project_file_count AS cp\n ON cp.case_id = ca.case_id\n \"\"\"", "def generate_table(self, outtablename,\n cols=['A', 'B', 'AB'],\n generateTable=True):\n if generateTable:\n new_indices = ['time (s)', 'mean counts']\n for idx in self.data[cols].describe().index[2:]:\n new_indices.append(idx)\n outTable = self.data[cols].describe()\\\n .set_index(pd.Index(new_indices))\n outTable.to_latex(\n self.tables_dir + outtablename + \".tex\", float_format=\"%d\")\n print(\"Outtable: \", outTable)", "def prepare_batch_samples_for_metadata_export(path_to_samples_info, tsca_id, google_bucket_id):\n # export raw data\n data = pd.read_table(path_to_samples_info)\n # Rename columns to match firecloud requirements\n data = data.rename(columns={'sample_id':'entity:sample_id', 'individual_id':'participant_id'})\n # Locations of BAM files in google bucket\n path_in_bucket_full = \"gs://%s/seq_data/%s\" % (google_bucket_id, tsca_id)\n # Extract bam filename\n data['bam_filename'] = data.apply(lambda row: row['clean_bam_file_capture'].split('/')[-1], axis=1)\n # Create bai filename (change extension on .bam file)\n data['bai_filename'] = data.apply(lambda row: \"%s%s\" %(row['bam_filename'][:-3], 'bai'), axis=1)\n # Change BAM path from xchip to Google cloud\n data['clean_bam_file_capture'] = \\\n data.apply( lambda row: \"%s/%s/%s\" \\\n %(path_in_bucket_full, row['external_id_validation'], row['bam_filename']), axis=1)\n # Add location of .bai file \n data['clean_bai_file_capture'] = \\\n data.apply( lambda row: \"%s/%s/%s\" \\\n %(path_in_bucket_full, row['external_id_validation'], row['bai_filename']), axis=1)\n # Add TSCA ID\n data['tsca_id'] = tsca_id\n\n # Reorganize columns (entity:sample_id at the beginning)\n columns = ['entity:sample_id'] + [col for col in data if col != 'entity:sample_id']\n data = data[columns]\n return data", "def transform(filtered_list):\n out_put = {}\n out_list = []\n # loop to get the required columns, random ordered\n for item in filtered_list:\n for val in item._fields:\n if val in type_dict:\n out_put[val] = type_dict.get(val)(getattr(item, val))\n out_list.append(out_put)\n out_put = {}\n\n # loop to the ordered columns data as per output\n all_rows = []\n for item in out_list:\n tmp_row = []\n for key in type_dict.keys():\n out_put[key] = item[key]\n tmp_row.append(item[key])\n all_rows.append(tmp_row)\n\n col_row = [col.replace('_', '-') for col in type_dict.keys()]\n all_rows.insert(0, col_row)\n return all_rows", "def expand_affiliation(df):\n from pandas import Series\n res = df[[\"source_id\", \"author_ids\", \"afid\"]].copy()\n res['afid'] = res[\"afid\"].str.split(';')\n res = (res[\"afid\"].apply(Series)\n .merge(res, right_index=True, left_index=True)\n .drop([\"afid\"], axis=1)\n .melt(id_vars=['source_id', 'author_ids'], value_name=\"afid\")\n .drop(\"variable\", axis=1)\n .dropna())\n res['afid'] = res['afid'].astype(float)\n return res", "def generateAggregatedCsvData(self, context, obj, entities):\n return sum([long(e.prop1.replace('-', ''), 16) for e in entities])", "def _merge(dts):\n df = pd.concat(dts)\n\n ma = df.pivot(index='isomir', columns='sample', values='counts')\n ma_mirna = ma\n ma = ma.fillna(0)\n ma_mirna['mirna'] = [m.split(\":\")[0] for m in ma.index.values]\n ma_mirna = ma_mirna.groupby(['mirna']).sum()\n ma_mirna = ma_mirna.fillna(0)\n return ma, ma_mirna", "def synthesize_from_table(df, geo_df, targets):\n # replace NaNs with None\n targets = targets.where(targets.notnull(), None)\n\n new_df = df\n\n for _, row in targets.iterrows():\n new_df = synthesize_one(\n df=new_df,\n target=row['target_value'],\n alloc_id=row['geo_id_col'],\n geo_df=geo_df,\n geo_col=row['capacity_col'],\n constraint_expr=row['capacity_expr'],\n filters=row['filters'],\n count=row['count'],\n stuff=row['stuff'])\n\n return new_df" ]
[ "0.5996871", "0.5834174", "0.56089836", "0.5596423", "0.5548671", "0.554599", "0.5534935", "0.548897", "0.5448419", "0.5394029", "0.537485", "0.5362557", "0.5339601", "0.53235745", "0.52827585", "0.5272961", "0.5233933", "0.522556", "0.52244353", "0.5220203", "0.5207982", "0.5201744", "0.5201744", "0.5178545", "0.51763254", "0.5175003", "0.5160906", "0.51356107", "0.5124829", "0.51237833", "0.51128864", "0.5099795", "0.50921696", "0.50755244", "0.506032", "0.5059872", "0.5051263", "0.5048963", "0.50474966", "0.5036502", "0.50320566", "0.5028542", "0.50064075", "0.5003736", "0.49984905", "0.498694", "0.49821743", "0.4978145", "0.49739316", "0.49608037", "0.49577188", "0.4951192", "0.4948323", "0.49438456", "0.49356404", "0.49352336", "0.49234664", "0.49216846", "0.4920609", "0.49190423", "0.4911877", "0.4910904", "0.49105003", "0.49079865", "0.48951948", "0.48951668", "0.48911723", "0.48845515", "0.48809332", "0.4872626", "0.48617828", "0.48614907", "0.4860857", "0.48594263", "0.48559833", "0.48516807", "0.48515028", "0.4847017", "0.48436916", "0.48436916", "0.4839328", "0.48309308", "0.48244923", "0.48214206", "0.48182416", "0.48149982", "0.48138413", "0.48123798", "0.48114213", "0.48028022", "0.47925562", "0.4789258", "0.47866267", "0.47784677", "0.47730643", "0.47654584", "0.4759721", "0.47539186", "0.47537482", "0.47493646" ]
0.63312393
0
reformat count or FPKM tables into a dictionary for rapid alignment of attr table with tracking_id
def alignAndReformat_countFPKMattrTables( self,analysis_id_I=None,sna2experimentID_I=None, sna2sns_I=None): #reformat countTable_flat = self.reformat_countTable( analysis_id_I=analysis_id_I, sna2experimentID_I=sna2experimentID_I, sna2sns_I=sna2sns_I,); fpkmTable_flat = self.reformat_fpkmTable( analysis_id_I=analysis_id_I, sna2experimentID_I=sna2experimentID_I, sna2sns_I=sna2sns_I,); attrTable_dict = self.reformat_attrTable(); #align countAndFpkmTable_aligned = []; for row in countTable_flat[:]: row.update(attrTable_dict[row['tracking_id']]); countAndFpkmTable_aligned.append(row); for row in fpkmTable_flat[:]: row.update(attrTable_dict[row['tracking_id']]); countAndFpkmTable_aligned.append(row); return countAndFpkmTable_aligned;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reformat_attrTable(\n self):\n #format into a dictionary of rows for quick aligning with the tracking_id\n if self.attrTable: attrTable = self.attrTable[:];\n else: attrTable = [];\n\n attrTable_dict = {};\n for row in attrTable:\n attrTable_dict[row['tracking_id']] = row;\n return attrTable_dict;", "def reformat_countOrFPKMTable(\n self,\n countOrFPKMTable_I=None,\n analysis_id_I=None,\n sna2experimentID_I=None,\n sna2sns_I=None,\n count_or_FPKM = 'count'):\n #format into a dictionary of rows for quick aligning with the tracking_id\n countOrFPKMTable_flat = [];\n for row in countOrFPKMTable_I:\n for k,v in row.items():\n if k=='tracking_id':continue;\n tmp = {};\n tmp['analysis_id'] = analysis_id_I;\n tmp['tracking_id'] = row['tracking_id'];\n\n sample_name_lst = k.split('_');\n sample_name_base = '_'.join(sample_name_lst[:-1]);\n sample_name_rep = eval(sample_name_lst[-1]);\n if sna2experimentID_I: \n experiment_id = sna2experimentID_I[sample_name_base];\n else:\n experiment_id=None;\n tmp['experiment_id'] = experiment_id;\n if sna2sns_I: \n sample_name = sna2sns_I[sample_name_base][sample_name_rep];\n else:\n sample_name=k;\n tmp['sample_name'] = sample_name;\n\n tmp['value'] = v;\n tmp['value_units'] = count_or_FPKM;\n tmp['used_'] = True;\n tmp['comment_'] = None;\n countOrFPKMTable_flat.append(tmp);\n return countOrFPKMTable_flat;", "def format_countOrFPKMTable(self,fpkmTracking_I):\n for fpkmTracking in fpkmTracking_I:\n for k,v in fpkmTracking.items():\n if k=='tracking_id' and type(fpkmTracking['tracking_id'])==type('string'):\n pass;\n elif k!='tracking_id' and type(fpkmTracking[k])==type('string'):\n fpkmTracking[k] = eval(v);\n return fpkmTracking_I;", "def tables() -> dict[str, str]:\n return {\n \"land_use\": \"zone_id\",\n \"tours\": \"tour_id\",\n \"trips\": \"trip_id\",\n \"persons\": \"person_id\",\n \"households\": \"household_id\",\n }", "def reformat_countTable(\n self,analysis_id_I=None,sna2experimentID_I=None,\n sna2sns_I=None):\n if self.countTable: countTable = self.countTable[:];\n else: countTable = [];\n\n countTable_flat = self.reformat_countOrFPKMTable(\n countOrFPKMTable_I=countTable,\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,\n count_or_FPKM = 'count');\n return countTable_flat;", "def normalize_counts_to_tpm(counts_dir, gff_dir, out_dir, feat='CDS', id_sym='gene_id='):\n count_files = [os.path.join(counts_dir, f) for f in os.listdir(counts_dir)]\n all_tpms = {}\n for cf in count_files:\n if \"_counts\" not in cf:\n continue\n tpm = normalize_counts_to_tpm_one_file(cf, gff_dir, feat, id_sym)\n #out_file = \"{}_tpm.csv\".format(os.path.basename(cf))\n #out_path = os.path.join(out_dir, out_file)\n #with open(out_path, \"w\") as fo:\n # for gene, t in tpm.items():\n # fo.write(\"{},{}\\n\".format(gene, t))\n prefix = os.path.basename(cf).split(\"_trimmed\")[0] # this would be specific to my naming convention\n all_tpms[prefix] = tpm\n return all_tpms", "def avail_table_to_dict(avail_data):\n avail_target = avail_data[\"TARGETID\"]\n avail_loc = avail_data[\"LOCATION\"]\n avail = dict()\n for lid, tgid in zip(avail_loc, avail_target):\n if lid in avail:\n avail[lid].append(tgid)\n else:\n avail[lid] = list([tgid])\n avail = {f: np.array(av) for f, av in avail.items()}\n return avail", "def accumulate_entries_as_tables(entries):\n name_table = {}\n num_table = {}\n for number, name in entries:\n name_table[name] = number\n num_table[number] = name\n\n return name_table, num_table", "def make_category_tables(category_table):\n category2label = {}\n label2category = {}\n for item in category_table.itertuples():\n category_id = item[0]\n label_id = item[4]\n category2label[category_id] = label_id\n label2category[label_id] = category_id\n return category2label, label2category", "def create_count_map(self) -> Dict[int, int]:\n res: Dict[int, int] = {}\n for sequence_data in self.model.values():\n sequence_data: NGramsSequence = cast(NGramsSequence, sequence_data)\n for count in sequence_data.next_count.values():\n count: int = cast(int, count)\n if count not in res:\n res[count] = 0\n res[count] += 1\n self.count_map = res\n logger.success('created count map')\n return res", "def tallying_genes():\n #Creating a tallying Mechanism of genes with multiple sequences in file and\n # an output file for future alignment of sequences \n blast_hit_results = open('blast_hits_report.txt', 'r')\n gene_dict={}\n\n for line in blast_hit_results:\n data = line.split(\"\\t\")\n \n if line.startswith('SeqID'):\n continue\n else:\n #Test to see if organism in dictionary\n verdict = gene_dict.get(data[6])\n \n if str(verdict) == \"None\":\n #creating new entry\n key = data[6]\n seq_info=str(data[0])+\"|\"+str(data[1])\n counter = 1\n #Value[Counts, Trimmed_Length, Blast Length, Blast_Score, Blast_Percent_Identity]\n value=[data[5], counter, [seq_info]]\n gene_dict.update({key:value})\n else:\n #Fills dictionary based on organism name\n seq_info=str(data[0])+\"|\"+str(data[1])\n gene_dict[data[6]][1]+=1\n gene_dict[data[6]][2].append(seq_info)\n blast_hit_results.close()\n return(gene_dict)", "def basic_table_details():\n tbl: pa.table = pa.Table.from_pylist([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ])\n\n results = {\n 'column_names': tbl.column_names,\n 'columns > map > combine_chunks > to_pylist': [col.combine_chunks().to_pylist() for col in tbl.columns],\n 'nbytes': tbl.nbytes,\n 'num_columns': tbl.num_columns,\n 'num_rows': tbl.num_rows,\n 'schema': tbl.schema,\n 'shape': tbl.shape,\n }\n\n print(results)", "def create_dicts_w_info(df,\n table_visit_diff_string,\n bad_records_string='num_bad_records'):\n\n hpos = df['src_hpo_id'].unique().tolist()\n\n site_dictionaries = {}\n\n for hpo in hpos:\n sample_df = df.loc[df['src_hpo_id'] == hpo]\n\n sample_df.loc[\"Total\"] = sample_df.sum(numeric_only=True)\n\n hpo_dict = sample_df.loc[\"Total\"].to_dict()\n\n site_dictionaries[hpo] = hpo_dict\n\n tot = 0\n\n num_bad_records = {}\n\n for hpo, info in site_dictionaries.items():\n num_bad_records[hpo] = info[bad_records_string]\n\n table_visit_diff_dict = {}\n tot_rec, tot_diff = 0, 0\n\n for hpo, info in site_dictionaries.items():\n bad_records = info[bad_records_string]\n difference = info[table_visit_diff_string]\n\n tot_rec += bad_records\n tot_diff += difference\n\n avg = round(difference / bad_records, 2)\n\n table_visit_diff_dict[hpo] = avg\n\n table_visit_diff_dict['Total'] = round(tot_diff / tot_rec, 2)\n\n return num_bad_records, table_visit_diff_dict", "def reformat_fpkmTable(\n self,analysis_id_I=None,sna2experimentID_I=None,\n sna2sns_I=None):\n if self.fpkmTable: fpkmTable = self.fpkmTable[:];\n else: fpkmTable = [];\n\n fpkmTable_flat = self.reformat_countOrFPKMTable(\n countOrFPKMTable_I=fpkmTable,\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,\n count_or_FPKM = 'fpkm');\n return fpkmTable_flat;", "def table_fields() -> Dict[str, TableFieldDetails]:\n return {\n \"REPEATS\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=0,\n bit_high=15,\n description=\"Number of times the line will repeat\",\n labels=None,\n ),\n \"TRIGGER\": TableFieldDetails(\n subtype=\"enum\",\n bit_low=16,\n bit_high=19,\n description=\"The trigger condition to start the phases\",\n labels=[\n \"Immediate\",\n \"BITA=0\",\n \"BITA=1\",\n \"BITB=0\",\n \"BITB=1\",\n \"BITC=0\",\n \"BITC=1\",\n \"POSA>=POSITION\",\n \"POSA<=POSITION\",\n \"POSB>=POSITION\",\n \"POSB<=POSITION\",\n \"POSC>=POSITION\",\n \"POSC<=POSITION\",\n ],\n ),\n \"POSITION\": TableFieldDetails(\n subtype=\"int\",\n bit_low=32,\n bit_high=63,\n description=\"The position that can be used in trigger condition\",\n labels=None,\n ),\n \"TIME1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=64,\n bit_high=95,\n description=\"The time the optional phase 1 should take\",\n labels=None,\n ),\n \"OUTA1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=20,\n bit_high=20,\n description=\"Output A value during phase 1\",\n labels=None,\n ),\n \"OUTB1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=21,\n bit_high=21,\n description=\"Output B value during phase 1\",\n labels=None,\n ),\n \"OUTC1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=22,\n bit_high=22,\n description=\"Output C value during phase 1\",\n labels=None,\n ),\n \"OUTD1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=23,\n bit_high=23,\n description=\"Output D value during phase 1\",\n labels=None,\n ),\n \"OUTE1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=24,\n bit_high=24,\n description=\"Output E value during phase 1\",\n labels=None,\n ),\n \"OUTF1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=25,\n bit_high=25,\n description=\"Output F value during phase 1\",\n labels=None,\n ),\n \"TIME2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=96,\n bit_high=127,\n description=\"The time the mandatory phase 2 should take\",\n labels=None,\n ),\n \"OUTA2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=26,\n bit_high=26,\n description=\"Output A value during phase 2\",\n labels=None,\n ),\n \"OUTB2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=27,\n bit_high=27,\n description=\"Output B value during phase 2\",\n labels=None,\n ),\n \"OUTC2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=28,\n bit_high=28,\n description=\"Output C value during phase 2\",\n labels=None,\n ),\n \"OUTD2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=29,\n bit_high=29,\n description=\"Output D value during phase 2\",\n labels=None,\n ),\n \"OUTE2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=30,\n bit_high=30,\n description=\"Output E value during phase 2\",\n labels=None,\n ),\n \"OUTF2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=31,\n bit_high=31,\n description=\"Output F value during phase 2\",\n labels=None,\n ),\n }", "def infodump(self):\n idtable = dict.fromkeys(KEYS_IDSTABLE)\n rttable = dict.fromkeys(KEYS_RTTABLE)\n #Construct Common table, IMDb table, Review table\n commontable,imdbtable,reviewtable = self.dump_imdb()\n #Contruct IDTable\n idtable[\"imdb_id\"] = imdbtable[\"imdb_id\"]\n if commontable[\"kind\"] == \"movie\":\n #Contruct RT table\n rttable = self.dump_rt()\n idtable[\"rt_id\"] = rttable[\"rt_id\"]\n return idtable,commontable,imdbtable,rttable,reviewtable", "def get_tally_map (self):\n if self._tally_map is None:\n self._tally_map = {}\n # for field in self.header[-3:]:\n for column in self.get_award_id_columns():\n self._tally_map[column] = self.tally_awards_in_column(column)\n return self._tally_map", "def get_indices_convert_dict(fn):\n pdb_inp = pdb.input(file_name=fn)\n pdb_hierarchy = pdb_inp.construct_hierarchy()\n \n newids = OrderedDict((atom.id_str(), idx) for (idx, atom) in enumerate(pdb_hierarchy.atoms()))\n oldids= OrderedDict((atom.id_str(), idx) for (idx, atom) in enumerate(pdb_inp.atoms()))\n \n return {'p2a': np.array([newids[atom.id_str()] for atom in pdb_inp.atoms()]),\n 'a2p': np.array([oldids[atom.id_str()] for atom in pdb_hierarchy.atoms()])}", "def pre_build_idf_table(self):\r\n doc_per_word_table = dict() # in how many documents does a word occur\r\n \r\n for doc in self.documents:\r\n # converting list to set will delete any duplicate words\r\n doc = self.preprocess_document(doc)\r\n doc_words = set(self.word_tokenize_preprocessed(doc))\r\n\r\n for word in doc_words:\r\n if word in doc_per_word_table:\r\n doc_per_word_table[word] += 1\r\n else:\r\n doc_per_word_table[word] = 1\r\n\r\n total_documents = len(self.documents)\r\n idf_table = dict()\r\n\r\n for word in doc_per_word_table:\r\n idf_table[word] = math.log2(total_documents / float(doc_per_word_table[word]))\r\n\r\n return idf_table", "def gfa_table_to_dict(gfa_data):\n gfa_target = gfa_data[\"TARGETID\"]\n gfa_loc = gfa_data[\"GFA_LOC\"]\n gfa_gmag = gfa_data[\"GAIA_PHOT_G_MEAN_MAG\"]\n gfa = dict()\n for lid, tgid, mag in zip(gfa_loc, gfa_target,gfa_gmag):\n print(zip(gfa_loc, gfa_target,gfa_gmag))\n if lid in gfa:\n gfa[lid].append(mag)\n else:\n gfa[lid] = list([mag])\n gfa = {f: np.array(av) for f, av in gfa.items()}\n return gfa", "def _convert_table_to_dict(self, data_table):\n column_names = ['star_name', 'distance', 'brightness', 'luminosity']\n stars = {}\n for line in data_table:\n stars[line[0]] = {column_names[i] : line[i] for i in range(1, len(column_names))}\n return stars", "def group(df):\r\n t = len(df.columns.values)\r\n ans = {}\r\n names = df.columns.values\r\n for index,row in df.iterrows(): \r\n for j in range(t-1): \r\n index = str(j) \r\n llaveCol = str(row[j])\r\n llaveClase = str(row[t-1]) \r\n if index not in ans:\r\n ans[index] = {}\r\n if llaveCol not in ans[index]:\r\n ans[index][llaveCol] = {} \r\n if llaveClase not in ans[index][llaveCol]:\r\n ans[index][llaveCol][llaveClase] = 0 \r\n ans[index][llaveCol][llaveClase]+=1 \r\n return ans", "def get_columns_dict(table, replace):\n # 0 is name, 1 is id\n if type(table.index) == pd.MultiIndex:\n colcount = 1 + len(table.index[0])\n else:\n colcount = 2\n cols = {}\n for c in table.columns:\n c_repres = \",\".join(c)\n if \"Filtergroups\" not in c:\n cols[colcount] = replace_in_str(str(c_repres), replace)\n colcount = colcount + 1\n return cols", "def aggregate_data(mts, feature, target):\r\n set_dict = dict()\r\n set_dict['mt'] = mts\r\n set_dict['feature'] = feature\r\n set_dict['target'] = target\r\n \r\n return set_dict", "def build_rule_count_dict(counts_iterator):\n rule_count_dict = {}\n for l in counts_iterator:\n if l[1] != 'NONTERMINAL':\n x = l[2]\n y = l[1] == 'UNARYRULE' and l[3] or l[3] + ' ' + l[4]\n # if l[1] == 'UNARYRULE':\n # y = l[3]\n # else: # l[1] == 'BINARYRULE'\n # y = l[3] + ' ' + l[4]\n if x not in rule_count_dict:\n rule_count_dict[x] = {}\n rule_count_dict[x][y] = int(l[0])\n return rule_count_dict", "def dictagnum(kind, fname):\n\n with open(fname, 'r') as g:\n g.next()\n g.next()\n m = g.next()\n startdict = agline(m)\n genold = startdict['gen']\n\n f = open(fname)\n f.next()\n f.next()\n d = {}\n y = '1'\n nb = []\n for l in f:\n adict = agline(l)\n ks = kind + 's'\n gen = adict['gen']\n well = adict['well']\n\n if adict['gen'] not in d:\n d[gen] = []\n \n if gen != genold:\n d[genold].append(sum(nb))\n nb = []\n else: \n if adict['well'] != y:\n d[gen].append(sum(nb))\n nb = []\n \n if kind == 'charge':\n if adict[ks] == 'x':\n nb.append(0)\n elif int(adict[ks]) >= 0 and (adict['charget'] == 'c' or \n adict['charget'] == 'o'):\n nb.append(1)\n elif adict[ks] == '-':\n pass\n #print('nb', nb)\n\n if kind == 'escd' or kind == 'escm':\n if adict[ks] == '':\n nb.append(0)\n elif int(adict[ks]) >= 0:\n nb.append(1)\n elif adict[ks] == '-':\n pass\n\n y = adict['well']\n genold = adict['gen']\n \n d[gen].append(sum(nb))\n \n return(d)", "def decode_fusionTable_schema(tables: list) -> dict:\n def _map_col(col: dict) -> dict:\n col_schema = {k: col.get(k, '') for k in ('name', 'columnId', 'description')}\n if col['name'] != 'Comment':\n col_schema['mode'] = 'REQUIRED'\n # We can actually use int/float now!\n col_type: str = col['type']\n if col_type == 'NUMBER':\n col_type = 'INT64' if col['formatPattern'] == 'NUMBER_INTEGER' else 'FLOAT64'\n col_schema['type'] = col_type\n return col_schema\n\n def _map_table(table: dict) -> dict:\n table_schema = {k: table.get(k, '') for k in ('name', 'tableId', 'description')}\n table_schema['columns'] = list(map(_map_col, table['columns']))\n return table_schema\n\n return dict((s.get('tableId'), s) for s in map(_map_table, tables))", "def format_bgc_metadata(df,float_id):\n mdf = df[bgc_metadata_columns]\n bgc_metadata_dict = {}\n for col in list(mdf):\n bgc_metadata_dict[col] = list(pd.Series(mdf[col].unique()).astype(str).str.strip())\n bgc_metadata_dict[col] = list(pd.Series(mdf[col].unique()).astype(str).str.strip().replace(\"'\",'\"'))\n bgc_metadata_dict = json.dumps(bgc_metadata_dict) \n bgc_metadata_df = pd.DataFrame({\"float_id\": [float_id], \"Metadata_Dict\": [bgc_metadata_dict]})\n return bgc_metadata_df", "def get_mapping_table_ids():\n dev_meta_dataset = f\"{BQ_PARAMS['DEV_PROJECT']}.{BQ_PARAMS['META_DATASET']}\"\n prod_meta_dataset = f\"{BQ_PARAMS['PROD_PROJECT']}.{BQ_PARAMS['PUBLIC_META_DATASET']}\"\n\n file_metadata_table_name = f\"{BQ_PARAMS['FILE_METADATA_TABLE']}_{API_PARAMS['RELEASE']}\"\n file_metadata_table_id = f\"{prod_meta_dataset}.{file_metadata_table_name}\"\n\n case_metadata_table_name = f\"{BQ_PARAMS['CASE_METADATA_TABLE']}_{API_PARAMS['RELEASE']}\"\n case_metadata_table_id = f\"{prod_meta_dataset}.{case_metadata_table_name}\"\n\n file_assoc_table_name = f\"{BQ_PARAMS['FILE_ASSOC_MAPPING_TABLE']}_{API_PARAMS['RELEASE']}\"\n file_assoc_table_id = f\"{prod_meta_dataset}.{file_assoc_table_name}\"\n\n aliquot_table_name = f\"{BQ_PARAMS['ALIQUOT_TO_CASE_TABLE']}_{API_PARAMS['RELEASE']}\"\n aliquot_table_id = f\"{prod_meta_dataset}.{aliquot_table_name}\"\n\n # todo switch to published table\n study_table_name = construct_table_name(API_PARAMS, prefix=get_prefix(API_PARAMS, 'allPrograms'))\n study_table_id = f\"{dev_meta_dataset}.{study_table_name}\"\n\n return file_metadata_table_id, case_metadata_table_id, file_assoc_table_id, aliquot_table_id, study_table_id", "def _create_tf_table(self, words) -> dict:\r\n\r\n freqTable = dict()\r\n tfTable = dict()\r\n\r\n totalWords = len(words)\r\n for word in words:\r\n if word in freqTable:\r\n freqTable[word] += 1\r\n else:\r\n freqTable[word] = 1\r\n \r\n uniqueWords = set(words)\r\n for word in uniqueWords:\r\n tfTable[word] = freqTable[word] / float(totalWords)\r\n\r\n return tfTable", "def get_tabledata_for(doc):\n plomino_table = doc.getForm().id\n data = dict(ADDITIONAL_VALUES)\n map = FIELDS_MAP[plomino_table]\n for key in doc.getItems():\n if key not in map:\n continue\n value = normalize_value(doc.getItem(key, None))\n data[map[key]] = value\n post_method = plomino_table + '_postprocessing'\n if post_method in globals():\n globals()[post_method](doc, data)\n iride_table = map['__tablename']\n return {\n iride_table: (data,)\n }", "def review_counts(stat_info_dict):\n review_counts = {}\n for release, stat_dict in stat_info_dict.items():\n review_counts_per_release = {}\n for key, stat in stat_dict.items():\n # review count\n review_counts_per_release[key] = stat['metric']\n review_counts[release] = review_counts_per_release\n return review_counts", "def reprocess_dict (dict1):\n out = {};\n for kk,value in dict1.iteritems():\n # parse keys\n (lo0,dur,decdec,freqmhz,nch),weight = kk[0].split(\"_\"),kk[1]\n if weight != \"natural\":\n weight += \":\" + kk[3];\n dec = -int(decdec.split(\"-\")[1]);\n freq = int(freqmhz[:-3])\n # parse layout\n lo = lo0;\n if lo[-2] in \"abcd\":\n lores = \"0.\"+lo[-1];\n lofreq = dict(a=650,b=800,c=1000,d=1400)[lo[-2]];\n lo = lo[:-2];\n else:\n lores = 0;\n lofreq = 0;\n lo = lo[4:];\n l00 = lo0[4:]\n wbins.add(weight);\n # make new entry\n out[lo0,lores,lofreq,freq,dec,weight] = [value,kk];\n return out;", "def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))", "def update_data(self):\n for sai_id_key in self.if_id_map:\n namespace, sai_id = mibs.split_sai_id_key(sai_id_key)\n if_idx = mibs.get_index_from_str(self.if_id_map[sai_id_key])\n counter_table = self.namespace_db_map[namespace].get_all(mibs.COUNTERS_DB, \\\n mibs.counter_table(sai_id))\n if counter_table is None:\n counter_table = {}\n self.if_counters[if_idx] = counter_table\n\n\n self.lag_name_if_name_map, \\\n self.if_name_lag_name_map, \\\n self.oid_lag_name_map, _, _ = Namespace.get_sync_d_from_all_namespace(mibs.init_sync_d_lag_tables, self.db_conn)\n\n self.if_range = sorted(list(self.oid_name_map.keys()) + list(self.oid_lag_name_map.keys()))\n self.if_range = [(i,) for i in self.if_range]", "def to_table(self):\n table = Table()\n table['THETA_LO'] = Quantity([self.counts.offset[:-1]], unit=self.counts.offset.unit)\n table['THETA_HI'] = Quantity([self.counts.offset[1:]], unit=self.counts.offset.unit)\n table['ENERG_LO'] = Quantity([self.counts.energy[:-1]], unit=self.counts.energy.unit)\n table['ENERG_HI'] = Quantity([self.counts.energy[1:]], unit=self.counts.energy.unit)\n table['counts'] = self.counts.to_table()['data']\n if self.counts.data_err is not None:\n table['counts_err'] = self.counts.to_table()['data_err']\n table['livetime'] = self.livetime.to_table()['data']\n table['bkg'] = self.bg_rate.to_table()['data']\n if self.bg_rate.data_err is not None:\n table['bkg_err'] = self.bg_rate.to_table()['data_err']\n table.meta['HDUNAME'] = \"bkg_2d\"\n return table", "def calculate_TF_IDF():\n conn = util.getDBConnection()\n sql = \"select word from clean_keywords\"\n print sql\n rows = util.executeSQL(conn, sql)\n word_tf = {}\n word_df = {}\n for row in rows:\n word = row[0]\n sql1 = \"select doc_id from keywords where name='\" + word + \"'\"\n print sql1\n res = util.executeSQL(conn, sql1)\n for row1 in res:\n pkg_id = row1[0]\n key = word + ':' + str(pkg_id)\n if key in word_tf:\n tf_count = word_tf[key]\n word_tf[key] = tf_count + 1\n else:\n word_tf[key] = 1\n if word in word_df:\n df_count = word_df[word]\n word_df[word] = df_count + 1\n else:\n word_df[word] = 1\n\n for word, df in word_df.iteritems():\n sql = 'update clean_keywords set df=' + str(df) + \" where word='\" + word + \"'\"\n print sql\n util.executeSQL(conn, sql)\n\n for word_pkgid, tf in word_tf.iteritems():\n word, pkg_id = word_pkgid.split(\":\")\n sql = 'update keywords set tf=' + str(tf) + \" where name='\" + word + \"' and doc_id=\" + str(pkg_id)\n print sql\n util.executeSQL(conn, sql)", "def dict2():\n print(record1)\n record2 = {}\n for k, v in record1.items():\n count = 0\n for letter in v:\n if letter == 't' or letter == 'T':\n count += 1\n record2[k] = count\n print(record2)", "def prepare_for_table(data, machine_id):\n\n defint = lambda x: 0 if x == '' else int(x)\n\n keep_and_convert = {\n 'MESSAGE': str,\n 'PRIORITY': defint,\n '__REALTIME_TIMESTAMP': defint,\n '_PID': defint,\n '_UID': defint,\n '_SYSTEMD_UNIT': str,\n 'SYSLOG_IDENTIFIER': str,\n '_COMM': str,\n }\n result = dict((key, converter(data.get(key, ''))) for key, converter in keep_and_convert.items())\n result['MACHINE_ID'] = machine_id\n return data['__CURSOR'], result", "def append_counting(dict):\n row_c = []\n # for nuc in NUC: #Scans all the elements and adds it to the table.\n # row_c.append(dict[nuc])\n for mot in MOT:\n row_c.append(dict[mot])\n for nuc_nr in NUC_NR :\n row_c.append(dict[nuc_nr + \"_NR\"])\n # #row.extend([dict[\"AA_NR\"], dict[\"TT_NR\"], dict[\"CC_NR\"], dict[\"GG_NR\"]])\n return row_c", "def get_patients_dict(table):\n\tf = open(table)\n\tpatients = f.readline().strip().split(\"\\t\")[1:]\n\t\t \n\tpatients_dict = {}\n\tfor i in patients:\n\t\tpatients_dict[i.replace('\"', '')] = {}\n\t\t \n\tfor i in f:\n\t\tl = i.strip().split(\"\\t\")\n\t\tgene = l[0]\n\n\t\tfor j in range(len(l[1:])):\n\t\t\tpatients_dict[patients[j]][gene] = int(l[1:][j])\n\treturn patients_dict", "def build_article_map(f='./wikispeedia_paths-and-graph/articles.tsv'):\n out_dict = {}\n count = 0\n with open(f, 'r') as r:\n for _ in xrange(12):\n next(r)\n for line in r:\n out_dict[line.strip('\\n')] = count\n count += 1\n return out_dict", "def files2dictionary(filename, countryID, supp_dict):\r\n\r\n fh = open(filename)\r\n header = next(fh)\r\n\r\n data_dict = {}\r\n data_dict[countryID] = {}\r\n\r\n numlist = range(1, 36)\r\n agelist = []\r\n for line in fh:\r\n linelist = line.strip().split(\",\")\r\n age = linelist[4]\r\n agelist.append(age)\r\n for icdrep in numlist:\r\n if str(age) not in data_dict[countryID]:\r\n data_dict[countryID][str(age)] = {}\r\n #if str(icdrep) not in data_dict[countryID][str(age)]:\r\n data_dict[countryID][str(age)][str(icdrep)] = float(linelist[icdrep+8]) # look into changing this to one million\r\n else:\r\n data_dict[countryID][str(age)][str(icdrep)] = float(linelist[icdrep+8]) # look into changing this to one million\r\n fh.close()\r\n supp_dict.update(support_counter(header.split(\",\"), agelist, supp_dict))\r\n return data_dict, supp_dict", "def compactify(self):\n logger.debug(\"rebuilding dictionary, shrinking gaps\")\n\n # build mapping from old id -> new id\n idmap = dict(zip(sorted(itervalues(self.token2id)), range(len(self.token2id))))\n\n # reassign mappings to new ids\n self.token2id = {token: idmap[tokenid] for token, tokenid in iteritems(self.token2id)}\n self.id2token = {}\n self.dfs = {idmap[tokenid]: freq for tokenid, freq in iteritems(self.dfs)}", "def _prepare_stats_table(self, pinfos):\n\n stats_tbl = OrderedDict()\n stats_tbl[\"Title\"] = OrderedDict()\n for res in self.rsts:\n stats_tbl[res.reportid] = OrderedDict()\n\n for pinfo in pinfos:\n for colname in (pinfo[\"colname\"], pinfo[\"xcolname\"]):\n if colname in stats_tbl[\"Title\"]:\n continue\n\n # Each column name is represented by a row in the statistics table. Fill the \"Title\"\n # column.\n title_dict = stats_tbl[\"Title\"][colname] = OrderedDict()\n defs = self._refdefs.info[colname]\n\n if defs.get(\"unit\") == \"nanosecond\":\n # Convert nanoseconds to microseconds.\n unit = \"us\"\n else:\n unit = defs.get(\"short_unit\", \"\")\n\n title_dict[\"colname\"] = colname\n if unit:\n title_dict[\"colname\"] += f\", {unit}\"\n title_dict[\"coldescr\"] = defs[\"descr\"]\n\n title_dict[\"funcs\"] = OrderedDict()\n for funcname in self._stats_funcs:\n if funcname in self.rsts[0].cstats[colname]:\n title_dict[\"funcs\"][funcname] = RORawResult.get_stat_func_descr(funcname)\n\n # Now fill the values for each result.\n for res in self.rsts:\n res_dict = stats_tbl[res.reportid][colname] = OrderedDict()\n res_dict[\"funcs\"] = OrderedDict()\n\n for funcname in title_dict[\"funcs\"]:\n val = res.cstats[colname][funcname]\n fmt = \"{}\"\n if defs.get(\"unit\") == \"nanosecond\" and \"index\" not in funcname:\n val /= 1000\n fmt = \"{:.2f}\"\n if defs[\"type\"] == \"float\":\n fmt = \"{:.2f}\"\n\n fdict = res_dict[\"funcs\"][funcname] = OrderedDict()\n fdict[\"val\"] = fmt.format(val)\n fdict[\"raw_val\"] = val\n\n if self._refres.reportid == res.reportid:\n fdict[\"hovertext\"] = \"This is the reference result, other results \" \\\n \"are compared to this one.\"\n continue\n\n ref_fdict = stats_tbl[self._refres.reportid][colname][\"funcs\"][funcname]\n change = val - ref_fdict[\"raw_val\"]\n if ref_fdict[\"raw_val\"]:\n percent = (change / ref_fdict[\"raw_val\"]) * 100\n else:\n percent = change\n change = fmt.format(change) + unit\n percent = \"{:.1f}%\".format(percent)\n fdict[\"hovertext\"] = f\"Change: {change} ({percent})\"\n\n return stats_tbl", "def _collect_counts(self, instance_list):\n \"\"\" Based on each instance, I augment empirical counts for every word and its BIO label in feature_count_table and for every transition from previous label to current label in transition_count_table.\n All \"rare words\" (those words that appear less than 3 times) are replaced by <UNK>.\n I also add label|START counts.\n \"\"\"\n # Build feature_count_table of V x labels and transition_count_table of labels x labels\n for instance in instance_list: # Set of <(w, pos), l>\n index = 0\n for t in instance.data: # Tuple of (w, pos)\n index = instance.data.index(t)\n # print t[0] # word\n # print instance.label[index] # label\n if t in self.V:\n self.feature_count_table[self.V.index(t)][self.labels.index(instance.label[index])] +=1\n else:\n self.feature_count_table[self.V.index('<UNK>')][self.labels.index(instance.label[index])] +=1\n if index > 0:\n self.transition_count_table[self.labels.index(instance.label[index-1])][self.labels.index(instance.label[index])] += 1\n else:\n self.transition_count_table[len(self.labels)][self.labels.index(instance.label[index])] += 1", "def normalize_keys(df: pd.DataFrame) -> None:\n renames = {'tripUpdate_trip_tripId': 'trip_id', 'tripUpdate_trip_startDate': 'start_date',\n 'tripUpdate_trip_directionId': 'direction_id', 'tripUpdate_trip_routeId': 'route_id',\n 'tripUpdate_trip_scheduleRelationship': 'schedule_relationship',\n 'tripUpdate_trip_startTime': 'start_time',\n 'tripUpdate_timestamp': 'timestamp', 'tripUpdate_vehicle_id': 'vehicle_id',\n 'stopSequence': 'stop_sequence', 'stopId': 'stop_id',\n 'scheduleRelationship': 'schedule_relationship2',\n 'vehicle_trip_tripId': 'trip_id', 'vehicle_trip_scheduleRelationship': 'schedule_relationship',\n 'vehicle_timestamp': 'timestamp', 'vehicle_vehicle_id': 'vehicle_id',\n 'vehicle_trip_startTime': 'start_time', 'vehicle_trip_startDate': 'start_date',\n 'vehicle_trip_routeId': 'route_id', 'vehicle_trip_directionId': 'direction_id',\n 'tripUpdate_stopTimeUpdate_stopSequence': 'stop_sequence',\n 'tripUpdate_stopTimeUpdate_stopId': 'stop_id',\n 'tripUpdate_stopTimeUpdate_arrival_delay': 'arrival_delay',\n 'tripUpdate_stopTimeUpdate_arrival_time': 'arrival_time',\n 'tripUpdate_stopTimeUpdate_departure_delay': 'departure_delay',\n 'tripUpdate_stopTimeUpdate_departure_time': 'departure_time',\n 'tripUpdate_stopTimeUpdate_arrival_uncertainty': 'arrival_uncertainty',\n 'tripUpdate_stopTimeUpdate_departure_uncertainty': 'departure_uncertainty',\n 'alert_activePeriod_start': 'period_start', 'alert_activePeriod_end': 'period_end',\n 'alert_informedEntity_routeId': 'route_id', 'alert_informedEntity_stopId': 'stop_id',\n 'alert_informedEntity_trip_tripId': 'trip_id',\n 'alert_informedEntity_trip_scheduleRelationship': 'schedule_relationship',\n 'alert_headerText_translation_text': 'header_text',\n 'alert_descriptionText_translation_text': 'description_text',\n }\n df.rename(columns=renames, inplace=True)", "def aggregate_buff_lines(res_lines, buff_lines, dispel_lines):\n\n res_data = {}\n buff_data = {}\n dispel_data = {}\n\n for lines, data in zip(\n (res_lines, buff_lines, dispel_lines), (res_data, buff_data, dispel_data)\n ):\n for spell_id, spell_name, source, target in lines:\n if spell_id not in data:\n data[spell_id] = {}\n\n spell_dict = data[spell_id]\n\n if source not in spell_dict:\n spell_dict[source] = 0\n\n spell_dict[source] += 1\n\n return res_data, buff_data, dispel_data", "def _finalize(self, sampled_data):\n final_data = dict()\n for table_name, table_rows in sampled_data.items():\n parents = self.metadata.get_parents(table_name)\n if parents:\n for parent_name in parents:\n foreign_key = self.metadata.get_foreign_key(parent_name, table_name)\n if foreign_key not in table_rows:\n parent_ids = self._find_parent_ids(table_name, parent_name, sampled_data)\n table_rows[foreign_key] = parent_ids\n\n reversed_data = self.metadata.reverse_transform(table_name, table_rows)\n\n fields = self.metadata.get_fields(table_name)\n\n final_data[table_name] = reversed_data[list(fields.keys())]\n\n return final_data", "def _transform_idoc(df):\n global _SIMPLECOUNT_COLUMNS\n\n try:\n df['comcnty'] = ((df['comcnty'] + 1) / 2).astype(int)\n df.columns = ['year', 'fk_simplecount_county'] + df.columns.tolist()[2:]\n\n indicator_list = [1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1620, 1621]\n \n c_nc = df['admtypo3'] == 1\n c_tv = df['admtypo3'] == 2\n c_pers = df['offtype2'] == 1 # df['offtype'] == 1\n c_prop = df['offtype2'] == 2 # df['offtype'] == 2\n c_sex = df['offtype2'] == 4 # df['offtype'] == 4\n c_drug = df['offtype2'].isin([3.1, 3.2, 3.3, 3.4, 3.5, 3.6]) # df['offtype'] == 3\n c_other = df['offtype2'].isin([0, 3, 5, 7]) # df['offtype'] == 7\n c_viol = df['offtype'] == 1\n c_male = df['sex'] == 'M'\n c_female = ~c_male\n\n c_first2 = [c_nc, c_tv]\n c_others = [c_pers, c_prop, c_sex, c_drug, c_other, c_viol, c_male, c_female]\n \n def helper(c, indicator_id, first2):\n df['fk_simplecount_indicator'] = indicator_id\n g = ['fk_simplecount_indicator', 'year', 'fk_simplecount_county']\n if first2:\n return df[c].groupby(g).size().reset_index(name='value')\n else:\n return df[c_nc & c].groupby(g).size().reset_index(name='value')\n\n out = pd.DataFrame()\n for i in range(2):\n out = out.append(helper(c_first2[i], indicator_list[i], first2=True))\n \n for i in range(len(c_others)):\n out = out.append(helper(c_others[i], indicator_list[i+2], first2=False))\n\n out = out.loc[out['fk_simplecount_county'].isin(range(1,102+1))]\n return out[_SIMPLECOUNT_COLUMNS]\n except:\n raise", "def _get_rank_values(self):\n \n info_gains = {}\n \n #caluclate info gain\n for col in self.cat_cols:\n info_gains[col] = self._get_info_gain(col)\n \n return info_gains", "def _getSourceMetrics(self, idKeyTuple, idValList, sourceTableList):\n if self._idKeyTuple is None:\n self._idKeyTuple = tuple(idKeyTuple)\n self._idKeyDTypeList = _getDTypeList(keyTuple = self._idKeyTuple,\n valTuple = idValList[0])\n else:\n if self._idKeyTuple != tuple(idKeyTuple):\n raise RuntimeError(\"idKeyTuple = %s != %s = first idKeyTuple; must be the same each time\" % \\\n (idKeyTuple, self._idKeyTuple))\n\n dataDict = {}\n for idTuple, sourceTable in itertools.izip(idValList, sourceTableList):\n if len(sourceTable) == 0:\n continue\n \n idList = sourceTable.get(\"id\")\n dataList = [sourceTable.get(key) for key in self._sourceKeyTuple]\n\n if self._sourceDTypeList is None:\n self._sourceDTypeList = [(key, arr.dtype)\n for key, arr in itertools.izip(self._sourceKeyTuple, dataList)]\n \n transposedDataList = zip(*dataList)\n del dataList\n\n dataDict.update((srcId, idTuple + tuple(data))\n for srcId, data in itertools.izip(idList, transposedDataList))\n return dataDict", "def preprocess_data_by_mapping(dicts_count, alias_mapping):\n\n new_mapping_dict = {}\n for (key, value) in dicts_count.items():\n\n # Replace the key with lower string\n new_key = unicode.lower(key)\n\n # Replace the key with object alias\n if new_key in alias_mapping:\n new_key = alias_mapping[new_key]\n\n # Check if the object key is already exists in the dict\n if new_key in new_mapping_dict:\n new_mapping_dict[new_key] += value\n else:\n new_mapping_dict[new_key] = value\n\n return new_mapping_dict", "def new_counts_dict():\n\n\tIN_FILES = [\"../_semtag_dataset_webanno_tfidf_inimigo.txt\",\"../_semtag_dataset_webanno_tfidf_publico.txt\" ]\n\n\ttxt = []\n\tfor in_file in IN_FILES:\n\t with codecs.open(in_file,\"r\",\"utf-8\") as fid:\n\t txt += fid.readlines()\n\t#words\n\twords = [w for m in txt for w in m.split()]\n\t#unique words\n\twords = list(set(words))\n\t#word index\n\twrd2idx = {w:-1 for w in words}\n\n\tset_trace()\n\t\n\twith open(COUNTS_DIC,\"w\") as fod:\n\t\tcPickle.dump(wrd2idx, fod, cPickle.HIGHEST_PROTOCOL)", "def aggregate_by_primary_attribute(table):\n result = {}\n for row in table:\n for attribute_to_aggregate_by in row[1].split(','):\n attribute_to_aggregate_by.strip()\n attribute_data = row[0]\n if attribute_to_aggregate_by not in result:\n result[attribute_to_aggregate_by] = [attribute_data]\n else:\n result[attribute_to_aggregate_by] += [attribute_data]\n return result", "def _create_metrics_table(font, format, base, create_glyph_metrics):\n # we don't set PCF_COMPRESSED_METRICS\n metrics = tuple(create_glyph_metrics(_g, base) for _g in font.glyphs)\n table_bytes = (\n bytes(le.uint32(format))\n + bytes(base.int32(len(metrics)))\n + b''.join(bytes(_t) for _t in metrics)\n )\n return table_bytes, format", "def calcCountDict(TFdict):\n\n countDict = {}\n\n for doc in TFdict:\n for term in doc:\n if term in countDict:\n countDict[term] +=1\n else:\n countDict[term] = 1\n\n return countDict", "def get_counts_by_manufacturers(table):\n manufacturers_dict = {}\n for item in table:\n try:\n if item[2]:\n try:\n if item[2] in manufacturers_dict.keys():\n manufacturers_dict[item[2]] += 1\n else:\n manufacturers_dict[item[2]] = 1\n except IndexError:\n pass\n else:\n raise ValueError\n except ValueError:\n pass\n\n return manufacturers_dict", "def summarize_metadata(self):\n meta_dict = {}\n for comp in self.dataset.data_vars:\n for mkey, mvalue in self.dataset[comp].attrs.items():\n meta_dict[f\"{comp}.{mkey}\"] = mvalue\n\n return meta_dict", "def prepare_data(groups):\n all_dicts = []\n for idx, group in groups:\n res_dict = {'organism': group.organism.iloc[0]}\n for g_idx, row in group.iterrows():\n if pd.notna(row.label):\n res_dict[row.cmp_name] = {'label': row.label, 'mic': row.MIC}\n else:\n res_dict[row.cmp_name] = {'label': '', 'mic': row.MIC}\n all_dicts.append(res_dict)\n return all_dicts", "def makeTargetFieldsDict(tgt_fields):\n global RES, NRES, HOTEL\n out_dict = {}\n for fld in tgt_fields:\n use, suffix = fld.split(\"_SF_\")\n if use in RES:\n act_field = \"RES\"\n elif use in NRES:\n act_field = \"JOB\"\n elif use in HOTEL:\n act_field = \"HOTEL\"\n else:\n # This is an untracked ause\n continue\n share_field = \"shr_{}\".format(use)\n sqft_field = \"{}_sqft\".format(use)\n out_dict[fld] = (act_field, share_field, sqft_field)\n return out_dict", "def get_tracking_info(self):\n items = [('run_id', b_to_str),\n ('asic_id', b_to_str),\n ('version_name', b_to_str),\n ('asic_temp', float),\n ('heatsink_temp', float),\n ('exp_script_purpose', b_to_str),\n ('flow_cell_id', b_to_str),\n ('device_id', b_to_str),\n ]\n attrs = self['/UniqueGlobalKey/tracking_id'].attrs\n return {key: converter(attrs[key]) for key, converter in items}", "def updateAnnoByIdDictFromMeta(syn,idDict,metaDf,refCol,fileExts):\n for key in idDict:\n print \"updating annotaion values for key: %s\" % key\n for synId in idDict[key]:\n print \"> %s\" %synId\n temp = syn.get(synId, downloadFile = False)\n exts = ')|('.join(fileExts)\n exts = r'(' + exts + ')'\n tempName = re.sub(exts,\"\",temp.name)\n row = df.loc[df[refCol] == tempName]\n temp[key] = map(str,row[key])[0]\n temp = syn.store(temp,forceVersion = False)\n print \"\"", "def make_contingency_tables(\n y: np.ndarray, flagged_A: np.ndarray, flagged_B: np.ndarray\n) -> Dict[int, np.ndarray]:\n\n y = np.array(y).astype(np.int64).flatten()\n flagged_A = np.array(flagged_A).astype(np.bool_).flatten()\n flagged_B = np.array(flagged_B).astype(np.bool_).flatten()\n\n if len(flagged_A) != len(y) or len(flagged_B) != len(y):\n raise ValueError(\n f\"Expected arrays y, flagged_A, and flagged_B of the same length: \\\n got {len(y)}, {len(flagged_A)}, and {len(flagged_B)}.\"\n )\n\n contingency_tables = {}\n for class_id in np.unique(y):\n\n items_flagged_A = flagged_A[y == class_id]\n items_flagged_B = flagged_B[y == class_id]\n\n a = (~items_flagged_A & ~items_flagged_B).sum()\n b = (~items_flagged_A & items_flagged_B).sum()\n c = (items_flagged_A & ~items_flagged_B).sum()\n d = (items_flagged_A & items_flagged_B).sum()\n\n table = np.array([[a, b], [c, d]])\n contingency_tables[class_id] = table\n\n return contingency_tables", "def getTF_IDFSpace():\n sids,documents = getSongTextInfo()\n texts = [[word for word in document.lower().split()] for document in documents]\n dictionary = corpora.Dictionary(texts)\n corpus = [dictionary.doc2bow(text) for text in texts]\n tfidf = models.TfidfModel(corpus)\n corpus_tfidf = tfidf[corpus]\n songMap = {}\n index = 0\n for doc in corpus_tfidf:\n sid = sids[index]\n rMap = {}\n for item in doc:\n wid = item[0]\n count = item[1]\n rMap[wid] = count\n songMap[sid] = rMap\n index += 1\n return songMap", "def table_summary():\n \n t = dict()\n t['name'] = get_names()\n t['Name'] = [get_properties(name)['label'] for name in t['name']]\n N = len(t['name'])\n \n # host\n t['host'] = ['Sagittarius', 'Sagittarius', 'none', 'Gaia-Sausage-Enceladus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Sequoia / Arjuna', np.nan, np.nan, 'Sequoia / Arjuna', 'Gaia-Sausage-Enceladus', 'Sequoia / Arjuna', 'Helmi / Wukong', 'Helmi / Wukong', 'Sagittarius', 'in situ / Helmi / Wukong', 'Helmi / Wukong', 'Cetus', 'Cetus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Cetus', 'Sequoia / Arjuna / I\\'itoi']\n \n # progenitor\n t['progenitor'] = [np.nan, np.nan, 'itself', 'NGC 5139', 'NGC 4590', np.nan, 'NGC 3201', '(Wukong / Helmi)', '(Wukong / Helmi)', np.nan, np.nan, np.nan, np.nan, 'NGC 5024', np.nan, 'NGC 5272', 'NGC 5024', 'NGC 5824', 'NGC 5824', np.nan, np.nan, np.nan, np.nan]\n \n # progenitor type\n t['type'] = ['DG' if name in ['elqui', 'indus', 'jhelum'] else 'GC' for name in t['name']]\n \n # metallicity\n t['feh'] = [-2.4, -2.4, -2.2, -1.5, -2.16, -2.3, -1.5, -2.1, -2.1, -1.6, -1.95, -1.6, -2.7, np.nan, -1.7, -1.1, -2.7, -1.9, np.nan, np.nan, -2.2, np.nan, -1.9]\n \n # associations\n t['friends'] = ['ATLAS', 'Aliqa Uma', np.nan, np.nan, np.nan, np.nan, np.nan, 'Jhelum', 'Indus', np.nan, np.nan, np.nan, np.nan, 'Sylgr', np.nan, np.nan, 'Ravi', 'Turbio', 'Triangulum', np.nan, np.nan, np.nan, np.nan]\n \n tout = Table(t)\n tout.pprint()\n tout.write('../data/stream_origin.fits', overwrite=True)", "def produce_mirna_single_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.single.bam.mirbase_counts.txt\")", "def coalesce_tables(tables):\n ## For each table, we can:\n ## Process the names in the first row, as column names\n ## If we have a \"quantity\" column, convert this from euro style to float\n\n ## If the column names are the same, we can append one to the other.\n \n proc_tables = OrderedDict()\n most_recent_key = None\n for tn,t in enumerate(tables):\n for i, r in enumerate(t):\n ##print(f\"Table {tn}, Row number {i}\")\n col_accessors = [str(x) for x in range(len(r))]\n ## Get the processed row names\n if i == 0: \n cnames = {}\n for c in col_accessors:\n cnames[c] = r[c].lower().strip().replace(\" \", \"\")\n continue\n ## Now, cnames was defined from iteration i==0\n rec = {}\n for c in col_accessors:\n rec[cnames[c]] = r[c]\n\n fixweight = lambda x: float(x.replace(\",\", \".\"))\n \n \n if 'netweight' in rec.keys():\n if rec['netweight'] is not None:\n rec['netweight'] = fixweight(rec['netweight'])\n\n if rec['no.'] is not None:\n ## new record\n most_recent_key = rec['no.']\n proc_tables[most_recent_key] = rec\n else:\n ## append the description to previous\n if rec['description'] is not None:\n proc_tables[most_recent_key]['description'] = proc_tables[most_recent_key]['description'] + \" \" + rec['description']\n\n\n return(list(proc_tables.values()))", "def build_table():\n with contextlib.ExitStack() as stack:\n files = [stack.enter_context(gzip.open(f, 'rt')) for f in sys.argv[1:]]\n iters = [(line.split() for line in f) for f in files]\n for it in iters:\n next(it)\n key = operator.itemgetter(0)\n table = []\n for k, g in itertools.groupby(merge(*iters, key=key), key=key):\n props = list(g)\n if len(props) == len(iters):\n table.append([k] + [x[1] for x in props])\n for snp in table:\n print(*snp)", "def create_op_set_id_version_map(table: VersionTableType) -> VersionMapType:\n result: VersionMapType = {}\n\n def process(release_version: str, ir_version: int, *args: Any) -> None:\n del release_version # Unused\n for pair in zip([\"ai.onnx\", \"ai.onnx.ml\", \"ai.onnx.training\"], args):\n if pair not in result:\n result[pair] = ir_version\n if pair[0] == \"ai.onnx.training\":\n result[\"ai.onnx.preview.training\", pair[1]] = ir_version\n\n for row in table:\n process(*row)\n return result", "def parse_distmat_to_dict(table):\r\n\r\n col_headers, row_headers, data = parse_matrix(table)\r\n assert(col_headers == row_headers)\r\n\r\n result = defaultdict(dict)\r\n for (sample_id_x, row) in zip(col_headers, data):\r\n for (sample_id_y, value) in zip(row_headers, row):\r\n result[sample_id_x][sample_id_y] = value\r\n return result", "def freq_table(a):\n Detail_freq = a.loc[:, (a.dtypes == object) | (a.dtypes == long) ].columns.get_values().tolist()\n print(Detail_freq)\n for freq in Detail_freq:\n df1 = pd.DataFrame(a[freq].value_counts(dropna=False).astype(float).map('{:20,.0f}'.format).sort_index()).rename(columns={freq:'Count'})\n df2 = pd.DataFrame(a[freq].value_counts(normalize = True, dropna=False).map('{:,.2%}'.format).sort_index()).rename(columns={freq:'Percentage'})\n df = pd.concat([df1, df2], axis = 1)\n print(df)", "def tab(data, column) :\r\n\r\n table = {}\r\n \r\n for i in data :\r\n if i[column] not in table :\r\n # add it to tab\r\n table[i[column]] = 1 \r\n else:\r\n # add tabulation\r\n table[i[column]] += 1\r\n\r\n #place the cursor back at 0\r\n \r\n return table", "def dictagfreq2(kind, fname):\n \n #x = ['movie', 'moviecode', 'offset', 'well', 'agmin', 'agsec', 'agdur', \n #'agtype', 'agcomm', 'escmin', 'escsec', 'escdur', 'esctype', 'escbeh', \n #'esccomm', 'gen', 'date', 'assay', 'fps', 'flyid', 'side', 'moviepart']\n\n d = {}\n y = '1'\n b = []\n \n with open(fname) as f:\n for l in f:\n adict = agline2(l)\n #print(l)\n \n if adict['well'] != y:\n agfreqcmd(kind, b, d[gen])\n b = []\n \n if adict['agtype'] != '-':\n b.append(adict['agtype'])\n\n if adict['esctype'] != '':\n b.append(adict['esctype'])\n \n gen = adict['gen'] \n if gen not in d:\n d[gen] = [] \n \n #print('b', b)\n #print('well', adict['well'])\n #print('gen', gen, 'd', d)\n \n y = adict['well']\n \n agfreqcmd(kind, b, d[gen])\n \n return(d)", "def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict", "def calcTFIDFDict(TFDict, IDFDict):\n\n TFIDFDict = {}\n\n #for each term in the document, multiply the tf and idf values\n\n for term in TFDict:\n TFIDFDict[term] = TFDict[term] * IDFDict[term]\n\n return TFIDFDict", "def produce_mirna_allbest_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.bam.mirbase_counts.txt\")", "def print_tables(hash_table, f_output, l_samples):\n\n l_fields = ['chr', 'pos', 'ref', 'alt', 'QUAL', 'FILTER',\n 'Func.refGene', 'Gene.refGene', 'GeneDetail.refGene', 'ExonicFunc.refGene', 'AAChange.refGene',\n 'cytoBand', 'ExAC_ALL', 'ExAC_AFR', 'ExAC_AMR', 'ExAC_EAS', 'ExAC_FIN', 'ExAC_NFE', 'ExAC_OTH',\n 'ExAC_SAS',\n 'avsnp147', 'SIFT_score', 'SIFT_pred', 'Polyphen2_HDIV_score', 'Polyphen2_HDIV_pred',\n 'Polyphen2_HVAR_score',\n 'Polyphen2_HVAR_pred', 'LRT_score', 'LRT_pred', 'MutationTaster_score', 'MutationTaster_pred',\n 'MutationAssessor_score', 'MutationAssessor_pred', 'FATHMM_score', 'FATHMM_pred', 'PROVEAN_score',\n 'PROVEAN_pred', 'VEST3_score', 'CADD_raw', 'CADD_phred', 'DANN_score', 'fathmm-MKL_coding_score',\n 'fathmm-MKL_coding_pred', 'MetaSVM_score', 'MetaSVM_pred', 'MetaLR_score', 'MetaLR_pred',\n 'integrated_fitCons_score', 'integrated_confidence_value', 'GERP++_RS', 'phyloP7way_vertebrate',\n 'phyloP20way_mammalian', 'phastCons7way_vertebrate', 'phastCons20way_mammalian', 'SiPhy_29way_logOdds']\n l_fields = l_fields + l_samples\n \n l_chr = set([item[0] for item in hash_table.keys()])\n\n fo = open(f_output, 'w')\n fo.write(','.join(l_fields) + '\\n')\n for key in sorted(hash_table.keys(), key=itemgetter(1)):\n fo.write(','.join(map(lambda field: hash_table[key].get(field, '.'), l_fields)) + '\\n')\n fo.close()", "def compute_helper_mempool_dictionaries():\n txn_density_dict = {}\n txn_parents_dict = {}\n txn_size_dict = {}\n mempool_data = parse_mempool_csv()\n for elem in mempool_data:\n size = elem.weight/MAXIMUM_BLOCK_WEIGHT # weight mapped to (0,1)\n txn_size_dict[elem.txid] = size \n txn_density_dict[elem.txid] = elem.fee/size\n if elem.parents != '':\n txn_parents_dict[elem.txid] = elem.parents.strip().split(';')\n return txn_density_dict,txn_parents_dict,txn_size_dict", "def calcIDFDict(countDict, numfiles):\n\n IDFDict = {}\n for term in countDict:\n IDFDict[term] = math.log(numfiles / countDict[term])\n\n return IDFDict", "def define_info_dict():\n\n d = {\n \"PRED\": {\n \"COLUMN\": [\"predicted_class\"],\n \"Number\": \"1\",\n \"Type\": \"String\",\n \"Description\": \"Predicted class: somatic, germline, artifact\",\n },\n \"PROB\": {\n \"COLUMN\": [\"prob_s\", \"prob_g\", \"prob_a\"],\n \"Number\": \"3\",\n \"Type\": \"Float\",\n \"Description\": \"Prediction probability of \"\n \"being somatic, germline, artifact in this order\",\n },\n \"SNP\": {\n \"COLUMN\": [\"is_on_db\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Present on SNP database (modified dbSNP/gnomAD (default) or user-provided database)\",\n },\n \"ANNO\": {\n \"COLUMN\": [\"annotation\"],\n \"Number\": \".\",\n \"Type\": \"String\",\n \"Description\": \"Indel annotation formatted as \"\n \"GeneSymbol|RefSeqAccession|CodonPos|IndelEffect\"\n \"Delimited by comma for multiple isoforms\",\n },\n \"COSMIC_CNT\": {\n \"COLUMN\": [\"cosmic_cnt\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"COSMIC count in v89\",\n },\n \"MAXMAF\": {\n \"COLUMN\": [\"max_maf\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Maximum minor allele frequency (MAF) \"\n \"reported in dbSNP, ClinVar and gnomAD non-cancer population\",\n },\n \"COMMON\": {\n \"COLUMN\": [\"is_common\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Common in dbSNP or MAXMAF > 0.01\",\n },\n \"CLIN\": {\n \"COLUMN\": [\"clin_info\"],\n \"Number\": \"1\",\n \"Type\": \"String\",\n \"Description\": \"ClinVar annotation formatted as ClinicalSignificance|Condition\",\n },\n \"ICP\": {\n \"COLUMN\": [\"indel_complexity\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Indel complexity: mismatches around the indel measured by edit distance\",\n },\n \"DSM\": {\n \"COLUMN\": [\"dissimilarity\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Dissimilarity: edit distance between indel and flanking sequences\",\n },\n \"ISZ\": {\n \"COLUMN\": [\"indel_size\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Indel size\",\n },\n \"REP\": {\n \"COLUMN\": [\"repeat\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Repeat: count of the indel-sequence repeats in flanking region\",\n },\n \"UQM\": {\n \"COLUMN\": [\"is_uniq_mapped\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Supported by uniquely mapped reads\",\n },\n \"NEB\": {\n \"COLUMN\": [\"is_near_boundary\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Near exon boundary\",\n },\n \"EQX\": {\n \"COLUMN\": [\"equivalence_exists\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Equivalent alignments exist for the indel\",\n },\n \"BID\": {\n \"COLUMN\": [\"is_bidirectional\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Supported by forward and reverse reads\",\n },\n \"MTA\": {\n \"COLUMN\": [\"is_multiallelic\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Multialleleic\",\n },\n \"FRM\": {\n \"COLUMN\": [\"is_inframe\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"In-frame indel\",\n },\n \"SPL\": {\n \"COLUMN\": [\"is_splice\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Located in splice region\",\n },\n \"TRN\": {\n \"COLUMN\": [\"is_truncating\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Truncating indel\",\n },\n \"CDD\": {\n \"COLUMN\": [\"is_in_cdd\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Located in conserved domain\",\n },\n \"LOC\": {\n \"COLUMN\": [\"indel_location\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Relative indel location within the transcript coding region\",\n },\n \"NMD\": {\n \"COLUMN\": [\"is_nmd_insensitive\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Insensitive to nonsense mediated decay\",\n },\n \"IPG\": {\n \"COLUMN\": [\"ipg\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Indels per gene\",\n },\n \"LEN\": {\n \"COLUMN\": [\"cds_length\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Coding sequence length. Median value if multiple isoforms exist\",\n },\n \"LC\": {\n \"COLUMN\": [\"lc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Linguistic complexity: diversity of k-mers in flanking 50-bp region\",\n },\n \"LLC\": {\n \"COLUMN\": [\"local_lc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Local linguistic complexity: diversity of k-mers in flanking 6-bp region\",\n },\n \"GC\": {\n \"COLUMN\": [\"gc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"GC-content in flanking 50-bp region\",\n },\n \"LGC\": {\n \"COLUMN\": [\"local_gc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Local GC-content in flanking 6-bp region\",\n },\n \"SG\": {\n \"COLUMN\": [\"strength\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"DNA bond strength of 2-mers in flanking 50-bp region\",\n },\n \"LSG\": {\n \"COLUMN\": [\"local_strength\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Local DNA bond strength of 2-mers in flanking 6-bp region\",\n },\n \"INS\": {\n \"COLUMN\": [\"is_ins\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Insertion\",\n },\n \"ATI\": {\n \"COLUMN\": [\"is_at_ins\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single insertion of A or T\",\n },\n \"ATD\": {\n \"COLUMN\": [\"is_at_del\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single deletion of A or T\",\n },\n \"GCI\": {\n \"COLUMN\": [\"is_gc_ins\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single insertion of G or C\",\n },\n \"GCD\": {\n \"COLUMN\": [\"is_gc_del\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single deletion of G or C\",\n },\n \"ALTC\": {\n \"COLUMN\": [\"alt_count\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Alt count: count of unique reads supporting ALT allele\",\n },\n \"REFC\": {\n \"COLUMN\": [\"ref_count\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Ref count: count of unique reads supporting REF allele\",\n },\n \"RCF\": {\n \"COLUMN\": [\"reclassified\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Reclassification applied\",\n },\n \"RQB\": {\n \"COLUMN\": [\"filtered\", \"rescued\"],\n \"Number\": \"1\",\n \"Type\": \"String\",\n \"Description\": \"Indel used to rescue this entry formatted as CHROM:POS:REF:ALT\",\n },\n }\n\n return d", "def get_mapping_table_ids():\n #\n aliquot_prefix = get_prefix(API_PARAMS, API_PARAMS['ALIQUOT_ENDPOINT'])\n case_aliquot_table_name = construct_table_name(API_PARAMS, prefix=aliquot_prefix)\n case_aliquot_table_id = f\"{BQ_PARAMS['DEV_PROJECT']}.{BQ_PARAMS['META_DATASET']}.{case_aliquot_table_name}\"\n\n study_table_name = construct_table_name(API_PARAMS, prefix=get_prefix(API_PARAMS, API_PARAMS['STUDY_ENDPOINT']))\n study_table_id = f\"{BQ_PARAMS['DEV_PROJECT']}.{BQ_PARAMS['META_DATASET']}.{study_table_name}\"\n\n case_external_mapping_prefix = get_prefix(API_PARAMS, API_PARAMS['CASE_EXTERNAL_MAP_ENDPOINT'])\n case_ext_map_table_name = construct_table_name(API_PARAMS, prefix=case_external_mapping_prefix)\n case_external_mapping_table_id = f\"{BQ_PARAMS['DEV_PROJECT']}.{BQ_PARAMS['META_DATASET']}.{case_ext_map_table_name}\"\n\n return case_aliquot_table_id, study_table_id, case_external_mapping_table_id", "def _prepare_links_table(self):\n\n links_tbl = OrderedDict()\n for colname in itertools.islice(self._pinfos, 1, None):\n links_tbl[colname] = {}\n links_tbl[colname][\"name\"] = f\"{colname}\"\n fname = colname.replace(\"%\", \"_pcnt\") + \".html\"\n links_tbl[colname][\"fname\"] = fname\n links_tbl[colname][\"hlink\"] = f\"<a href=\\\"{fname}\\\">{colname}</a>\"\n\n return links_tbl", "def parse_mixcr_table(filepath):\n tbl = pd.read_csv(filepath, sep='\\t')\n out = {}\n for _, row in tbl.iterrows():\n motifs = get_binding_motifs(row['aaSeqImputedCDR3'])\n for kind, motif_counts in motifs.items():\n for motif, count in motif_counts.items():\n for mykind in [kind, 'all_types']:\n key = (mykind, motif)\n if key not in out:\n out[key] = {\n 'num_unique_seqs': 0,\n 'num_clones': 0,\n 'num_unique_occurences': 0,\n 'num_clonal_occurences': 0,\n }\n out[key]['num_unique_seqs'] += 1\n out[key]['num_clones'] += row['cloneCount']\n out[key]['num_unique_occurences'] += count\n out[key]['num_clonal_occurences'] += count * row['cloneCount']\n return out", "def tr_nc_dict(dfin):\n\n\ttr_nc_index_dict = OrderedDict()\n\t\n\ttrCount = 0\n\tpreviousTrIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_tr=\"\"\n\n\tfor i in range(len(dfin)):\n# print dfin.loc[i]\n\n\t\tif dfin.loc[i,'feature'] == 'transcript':\n\t\t\ttrdict = parse_mod_entry(dfin.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\n\t\t\tif trCount != 0:\n\t\t\t\tnewTrIndex = i\n\t\t\t\ttr_nc_index_dict[current_id] = [previousTrIndex,newTrIndex]\n\t\t\t\tpreviousTrIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\ttrCount += 1\n\n\t\t\telse:\n\t\t\t\tnewTrIndex = 0\n\t\t\t\ttrCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\t\n\t\tif i == (len(dfin)-1):\n\t\t\tnewTrIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\ttr_nc_index_dict[current_id] = [previousTrIndex,newTrIndex]\n\t\t\t\n\treturn tr_nc_index_dict", "def _make_metric_dict(self, metrics):\n exon = None\n hgvs_c = None\n\n slen = len(metrics.split(':'))\n if slen == 1:\n if \"(dist=\" not in metrics:\n tx = metrics\n else:\n tx = None\n elif slen == 2:\n tx = metrics.split('(')[0]\n hgvs_c = metrics.split(':')[1].rstrip(')')\n elif slen == 3:\n tx = metrics.split('(')[0]\n exon = metrics.split(':')[1][4:]\n hgvs_c = metrics.split(':')[2].rstrip(')')\n else:\n raise UserWarning(\"There are an incorrect number of fields listed in: \" + str(self.info))\n\n if hgvs_c:\n full_c = ':'.join([tx, hgvs_c])\n hgvs_parser = Hgvs(full_c)\n hgvs_basep = hgvs_parser.start\n else:\n hgvs_basep = None\n\n return {\n 'AAP': None,\n 'BASEP': hgvs_basep,\n 'EXON': exon,\n 'HGNC': None,\n 'HGVSC': hgvs_c,\n 'HGVSP1': None,\n 'HGVSP3': None,\n 'SOURCE': self.tfx_type,\n 'SPLICE': self.splicing,\n 'TXC': tx,\n 'PVT': self.vtype,\n 'VFX': None\n }", "def __generate_dict(self, test_output_dir, types, _table):\n # dump json from file to dict\n\n table_dict = CaseDump(os.path.join(test_output_dir, _table + OUT_SUFFIX), CASE_FILE_TYPE_JSON).dump()\n\n # foreach key from statistic report, split it into table/type/key/sub_value\n type_dict = {}\n for _key in table_dict.keys():\n # wipe off table\n _key = _key.replace(\"%s_\" % _table, \"\")\n for _type in types:\n # wipe off type\n if _type and _type in _key:\n if _type not in type_dict.keys():\n type_dict[_type] = {}\n _key = _key.replace(\"%s_\" % _type, \"\")\n # if has sub_value, set RESULT_DICT[table][type][key] = sub_value\n # else set RESULT_DICT[table][type][key] = None\n if \"#\" in _key:\n sub_value = _key.split(\"#\")\n type_dict[_type][sub_value[0]] = sub_value[1]\n else:\n type_dict[_type][_key] = \"\"\n break\n\n global RESULT_DICT\n RESULT_DICT[_table] = type_dict", "def buildAdsTable_v1(output_file = None):\r\n ads_table = []\r\n text_props = ['readability_text', '_all']\r\n onto_props_with_mapping = {'phone':['telephone.name', 'telephone.name.raw'], 'email': ['email.name', 'email.name.raw'],\r\n 'posting_date':['inferlink_date', 'readability_date', 'high_recall_readability_date'],\r\n 'price':['price'], 'location':['addressLocality'],\r\n 'name':['name'],\r\n 'ethnicity':['ethnicity'],\r\n 'eye_color':['eyeColor'], 'title':['title'],\r\n 'hair_color':['hairColor'], 'nationality':['nationality'],\r\n 'business_type':['business_type'],\r\n 'business_name':['streetAddress'], 'services':['serviceType'],\r\n 'business': ['streetAddress'],\r\n 'physical_address': ['streetAddress'],\r\n 'gender':['gender'], 'top_level_domain':['top_level_domain'],\r\n 'obfuscation':['telephone.isObfuscated', 'email.isObfuscated'],\r\n 'age':['age'], 'hyperlink:':['relatedLink'], 'drug_use':['drug_use'],\r\n 'review_site':['review_site'], 'review_id':['review_id'],\r\n 'number_of_individuals':['name_count'],\r\n 'ad': ['identifier'],\r\n 'multiple_phone': ['telephone_count'],\r\n 'cluster': ['seller.uri'],\r\n 'seed': ['seller.telephone.name', 'seller.email.name']\r\n }\r\n non_readability_props = ['number_of_individuals', 'ad', 'multiple_phone', 'cluster', 'phone', 'posting_date', 'email']\r\n onto_props_without_mapping = ['image_with_email', 'image_with_phone']\r\n for property, value_list in onto_props_with_mapping.iteritems():\r\n dict = {}\r\n dict['onto_prop'] = property\r\n mappings = []\r\n tmp = {}\r\n for v in value_list:\r\n if property == 'phone' or v == 'seller.telephone.name':\r\n tmp[v] = 'build_phone_match_clause'\r\n tmp['_all'] = 'build_phone_match_clause'\r\n tmp['url'] = 'build_phone_regexp_clause'\r\n elif v == 'email.name':\r\n tmp[v] = 'build_email_match_clause'\r\n tmp['_all'] = 'build_match_phrase_clause'\r\n elif property == 'ad':\r\n tmp[v] = 'build_term_clause'\r\n elif '_count' in v:\r\n tmp[v] = 'build_count_match_clause'\r\n elif property == 'gender':\r\n tmp[v] = 'build_gender_match_clause'\r\n elif property == 'posting_date':\r\n tmp[v] = 'build_match_phrase_clause'\r\n else:\r\n tmp[v] = 'build_match_clause'\r\n if property not in non_readability_props:\r\n for v in text_props: # will overwrite for seller.telephone.name\r\n tmp[v] = 'build_match_clause_inner'\r\n mappings.append(tmp)\r\n dict['mappings'] = mappings\r\n ads_table.append(dict)\r\n\r\n for property in onto_props_without_mapping:\r\n dict = {}\r\n dict['onto_prop'] = property\r\n mappings = []\r\n tmp = {}\r\n for v in text_props:\r\n tmp[v] = 'build_match_clause_inner'\r\n mappings.append(tmp)\r\n dict['mappings'] = mappings\r\n ads_table.append(dict)\r\n\r\n if output_file:\r\n file = codecs.open(output_file, 'w', 'utf-8')\r\n for entry in ads_table:\r\n json.dump(entry, file)\r\n file.write('\\n')\r\n file.close()", "def generateAggregatedCsvData(self, context, obj, entities):\n return sum([long(e.prop1.replace('-', ''), 16) for e in entities])", "def _parse_id_to_taxonomy_file(f):\r\n result = {}\r\n for line in f:\r\n line = line.strip()\r\n if line:\r\n identifier, taxonomy = map(strip, line.split('\\t'))\r\n result[identifier] = taxonomy\r\n return result", "def write_count_table(filtered_counts, gene_names, ids_liver_header):\n with open(\"filtered_tcga_counts.tsv\", \"w\") as file:\n file.write(\"gene_id\\tgene_name\\t\" + \"\\t\".join(ids_liver_header) + \"\\n\")\n for gene_name, counts_line in zip(gene_names, filtered_counts):\n file.write(gene_name + \"\\t\" + \"\\t\" + \"\\t\".join(counts_line) + \"\\n\")", "def get_table(text, size = 1):\r\n result = {}\r\n for i in range(len(text)):\r\n chars = text[i:i+size]\r\n try:\r\n out = text[i + size]\r\n except IndexError:\r\n break\r\n char_dict = result.get(chars, {})\r\n if out not in char_dict:\r\n char_dict[out] = 0\r\n char_dict[out] += 1\r\n result[chars] = char_dict\r\n return result", "def process_ubern(fdict,cdmd,cdm,fn,proflist,campaigns,data):\n good=False\n for d in data:\n if 'Min' in d:\n good=True\n if 'ri_name' in d:\n key=d.split()[-1]\n if key not in fdict.keys():\n fdict[key]={'observations_table':{},'header_table':{}}\n \n if 'COMP018' in fn:\n print('018')\n if not good:\n print(fn,'could not be processed')\n return\n if(not fdict[key]['observations_table']):\n \n for k in cdmd['observations_table'].element_name.values:\n fdict[key]['observations_table'][k]=[]\n for k in cdmd['header_table'].element_name.values:\n fdict[key]['header_table'][k]=[]\n refdate=datetime(year=1900,month=1,day=1)\n header=True\n aux={}\n l=0\n comp=fn[:7]\n for d in data:\n if 'Min' in d:\n head=d.split()\n break\n l+=1\n m=0\n for d in data[l+1:]:\n dlist=d.split()\n if dlist:\n if dlist[0]=='NA':\n continue\n #fdict[key]['observations_table']['date_time']+=ddlen*[fdict[key]['header_table']['report_timestamp'][-1]]\n \n try:\n x=float(dlist[1])*100\n except ValueError:\n try: \n if 'PPPcorr' in head:\n x=float(dlist[2])*100\n else:\n continue\n except ValueError:\n continue\n m+=1\n \n if m==0:\n print(fn,'no valid obs')\n return\n \n try:\n idx=np.where(cdm['iri_rstype_map'].ri_name==key.split('_')[0])[0][0]\n fdict[key]['sensor_id']=numpy.string_(cdm['iri_rstype_map'].vapor_name[idx]) #.split(b',')[0])\n except:\n fdict[key]['sensor_id']=numpy.string_('NA')\n \n l=0\n for d in data:\n if 'ri_name' in d:\n key=d.split()[1]\n print(comp,fn,key)\n if 'measurement_programme' not in fdict[key]['header_table'].keys():\n fdict[key]['header_table']['measurement_programme']=[]\n idx=campaigns.index[campaigns.ID==comp]\n fdict[key]['header_table']['measurement_programme'].append(campaigns.Name[idx].values[0]) \n\n id_ascent=fn.split('/')[-1][:11]\n #key=np.string_('0-20100-0-0000'+fn[-8])\n sidx=np.where(cdm['station_configuration'].measuring_system_id==numpy.string_(key))[0][0]\n pidx=np.where(proflist['id_ascent']==id_ascent)[0][0]\n fdict[key]['header_table']['primary_station_id'].append(cdm['station_configuration'].primary_id.values[sidx])\n fdict[key]['header_table']['report_id'].append(fdict[key]['header_table']['primary_station_id'][-1]+\n b'-'+numpy.string_(fn.split('_')[-2]))\n print(key,fdict[key]['header_table']['report_id'])\n fdict[key]['header_table']['latitude'].append(cdm['station_configuration'].latitude.values[sidx])\n fdict[key]['header_table']['longitude'].append(cdm['station_configuration'].longitude.values[sidx])\n fdict[key]['header_table']['height_of_station_above_sea_level'].append(\n proflist['alt'].values[pidx])\n\n for k in 'latitude','longitude':\n aux[k]=fdict[key]['header_table'][k][-1]\n\n if 'date ' in d:\n dat=d.split()[-1]\n if 'hour_utc' in d:\n hour=d.split()[-1]\n hh=hour.split(':')\n if len(hh)==2:\n hour=hour+':00'\n relset=datetime.strptime(dat+' '+hour,'%Y-%m-%d %H:%M:%S')\n fdict[key]['header_table']['report_timestamp'].append(int((relset-refdate).total_seconds()))\n fdict[key]['header_table']['record_timestamp'].append(int((relset-refdate).total_seconds()))\n if 'Min' in d:\n head=d.split()\n break\n l+=1\n\n m=0\n dlen=len(head)-2\n ddlen=dlen\n dstart=2\n if 'Notes' in head:\n dlen-=1\n ddlen-=1\n if 'PPPcorr' in head:\n dstart+=1\n ddlen-=2\n ovar={'TTT':85,'TTTcorr':85,'UU':38,'H':117,'DEWPT':36,'DIR':106,'WSPEED':107}\n offset={'TTT':273.15,'TTTcorr':273.15,'UU':0,'H':0,'DEWPT':273.15,'DIR':0,'WSPEED':0}\n scale={'TTT':1,'TTTcorr':1,'UU':1,'H':9.80655,'DEWPT':1,'DIR':1,'WSPEED':1}\n units={'TTT':5,'TTTcorr':5,'UU':0,'H':806,'DEWPT':5,'DIR':110,'WSPEED':731}\n for d in data[l+1:]:\n dlist=d.split()\n if dlist:\n if dlist[0]=='NA':\n continue\n #fdict[key]['observations_table']['date_time']+=ddlen*[fdict[key]['header_table']['report_timestamp'][-1]]\n \n try:\n fdict[key]['observations_table']['z_coordinate']+=ddlen*[float(dlist[1])*100]\n except ValueError:\n try: \n if 'PPPcorr' in head:\n fdict[key]['observations_table']['z_coordinate']+=ddlen*[float(dlist[2])*100]\n else:\n continue\n except ValueError:\n continue\n #fdict[key]['observations_table']['z_coordinate']+=ddlen*[numpy.nan]\n \n fdict[key]['observations_table']['date_time']+=ddlen*[fdict[key]['header_table']['report_timestamp'][-1]+int(float(dlist[0])*60)]\n if ddlen<len(head[dstart:dlen+2]):\n fdict[key]['observations_table']['observed_variable']+=[ovar[i] for i in head[dstart+1:dlen+2]]\n fdict[key]['observations_table']['units']+=[units[i] for i in head[dstart+1:dlen+2]]\n else:\n fdict[key]['observations_table']['observed_variable']+=[ovar[i] for i in head[dstart:dlen+2]]\n fdict[key]['observations_table']['units']+=[units[i] for i in head[dstart:dlen+2]]\n for i in range(dstart,dlen+2):\n \n if head[i]!='TTTcorr': \n try:\n fdict[key]['observations_table']['observation_value'].append(offset[head[i]]+scale[head[i]]*float(dlist[i]))\n except ValueError:\n fdict[key]['observations_table']['observation_value'].append(numpy.nan)\n else:\n try:\n fdict[key]['observations_table']['observation_value'][-1]=offset[head[i]]+scale[head[i]]*float(dlist[i])\n except ValueError:\n pass\n \n fdict[key]['observations_table']['observation_id']+=[numpy.string_('{:0>8}'.format(m+i+1)) for i in range(ddlen)]\n m+=ddlen\n else:\n break\n\n aux['sensor_id']='NA'\n try:\n idx=np.where(cdm['iri_rstype_map'].ri_name==key)[0][0]\n aux['sensor_id']=cdm['iri_rstype_map'].vapor_name[idx]\n except:\n print('could not find rs type key for '+key+'!')\n pass\n aux['report_id']=fdict[key]['header_table']['report_id'][-1]\n\n for k,v in aux.items():\n fdict[key]['observations_table'][k]+=[v]*m\n \n return", "def cars_dict_to_table(car_data):\n table_data = [[\"ID\", \"Car\", \"Price\", \"Total Sales\"]]\n for item in car_data:\n table_data.append([item[\"id\"], format_car(item[\"car\"]), item[\"price\"], item[\"total_sales\"]])\n return table_data", "def convert_trsp_index(geneDictNonCoding, df, TR_index_dict):\n\n\n\tgeneDictCanon = OrderedDict()\n\t\n\tfor gene in geneDictNonCoding:\n\t\ttrDF = df.iloc[geneDictNonCoding[gene][0]:geneDictNonCoding[gene][1]]\n\t\ttrDFz = trDF.reset_index(drop=True)\n\t\t\n\t\ttrCount = 0\n\t\ttrDictLoc = OrderedDict()\n\t\t\n\t\tfor i in range(len(trDFz)):\n\t\t\tif trDFz.loc[i, 'feature'] == 'transcript':\n\t\t\t\ttr = trDFz.loc[i, 'transcript_id']\n\t\t\t\ttrdict = parse_entry(tr)\n\t\t\t\ttrName = trdict['transcript_id'][0]\n\t\t\t\ttrDictLoc[trName] = [trDFz.loc[i, 'chromStart'], trDFz.loc[i, 'chromEnd']]\n\t\t\t\ttrCount += 1\n\t\t\n\t\tif trCount > 1:\n# print gene, \"more than 1 trsp !!! \\n\"\n\t\t\t\n\t\t\trangeDict = OrderedDict() ## store the ranges, and take the longest\n\t\t\tfor key in trDictLoc:\n\t\t\t\ttrRange = len(range(int(trDictLoc[key][0]),int(trDictLoc[key][1])))\n\t\t\t\trangeDict[key] = trRange\n\t\t\t\t\n\t\t\tv=list(rangeDict.values())\n\t\t\tk=list(rangeDict.keys())\n\t\t\ttrOut = k[v.index(max(v))]\n# print trOut\n\t\t\tgeneDictCanon[trOut] = [gene, TR_index_dict[trOut]]\n\t\t\t\n\t\t\t\n\n\t\telse: ## for genes with single transcripts\n\t\t\ttrOut = trDictLoc.keys()[0]\n\t\t\tgeneDictCanon[trOut] = [gene, TR_index_dict[trOut]]\n\treturn geneDictCanon", "def get_results(self):\n d = {}\n# r = {}\n for analyser in self.xml_tree.getroot():\n for child in analyser:\n if child.tag == 'all-records':\n for record in child:\n attributes = record.attrib\n sample = attributes['sampleId']\n assay_id = attributes['assayId']\n genotype = attributes['genotypeId']\n quality = attributes['description'].split('.')[0]\n if re.match(r'rs\\d+', assay_id):\n if sample in d:\n if assay_id in d[sample]:\n for allele in list(genotype):\n if allele not in d[sample][assay_id]['genotype']:\n d[sample][assay_id]['genotype'] += allele\n if quality not in d[sample][assay_id]['quality']:\n d[sample][assay_id]['quality'].append(quality)\n else:\n d[sample][assay_id] = {'genotype': genotype, 'quality': [quality]}\n else:\n d[sample] = {assay_id: {'genotype': genotype, 'quality': [quality]}}\n# if sample in r:\n# if assay_id in r[sample]:\n# for allele in list(genotype):\n# if allele not in r[sample][assay_id]:\n# r[sample][assay_id] += allele\n# else:\n# r[sample][assay_id] = genotype\n# else:\n# r[sample] = {assay_id: genotype}\n# for k, v in r.items():\n# for k1, v1, in v.items():\n# if len(v1) == 1:\n# v[k1] += v1\n# pprint.pprint(r)\n# df = pd.DataFrame.from_dict(r).transpose()\n# print(df)\n# df.to_excel('snpcheck.xlsx')\n return d", "def idf_dict1(l):\n idf = {}\n # first look for idf in abstract field\n solrdict = list_to_dict(l)\n for doc in solrdict.itervalues():\n terms = doc.get('abstract',{})\n for w,t in terms.iteritems():\n if not idf.has_key(w):\n idf[w] = t['tf-idf'][0] / t['tf'][0]\n # next add idf values from title field\n for doc in solrdict.itervalues():\n terms = doc.get('title',{})\n for w,t in terms.iteritems():\n if not idf.has_key(w):\n idf[w] = t['tf-idf'][0] / t['tf'][0]\n return idf", "def compile_as_dict(self, data):\r\n fut = {}\r\n index = self.primary_key.index\r\n for i in data:\r\n popped = i.pop(index)\r\n head = self.table_headers.copy()\r\n head.pop(index)\r\n s = {}\r\n for j in range(len(head)):\r\n s[j] = i[j]\r\n fut[popped] = s\r\n return fut", "def table_to_dict(self, tab):\n dict = {}\n for colname in tab.colnames:\n dict[colname] = tab[colname].data\n return dict", "def analysis():\r\n data_frame = load_from_mysql('core', 'BDFMHQAA_D')\r\n data_frame.registerTempTable('business')\r\n gd = data_frame.select('AA03CSNO', 'AA08PRON')\r\n\r\n def merge_count(a, b):\r\n r = {}\r\n for p, c in a.items():\r\n if p in r:\r\n r[p] += c\r\n else:\r\n r[p] = c\r\n for p, c in b.items():\r\n if p in r:\r\n r[p] += c\r\n else:\r\n r[p] = c\r\n return r\r\n result = gd.map(lambda row: (row.AA03CSNO, {row.AA08PRON: 1})).reduceByKey(merge_count)\r\n pron_count = gd.map(lambda row: (row.AA08PRON, 1)).reduceByKey(lambda a, b: a + b)\r\n\r\n # result = gd.map(lambda row: (row.AA03CSNO, row.AA08PRON))\r\n print(result.take(10))\r\n print('----------------pron count-----------------')\r\n print(pron_count.collect())\r\n\r\n print(gd)" ]
[ "0.7227093", "0.64672595", "0.63795495", "0.5744344", "0.5535398", "0.5517264", "0.54640967", "0.542029", "0.53676605", "0.5360893", "0.5334983", "0.5295043", "0.5288153", "0.5281474", "0.52813345", "0.5253252", "0.52377903", "0.5232528", "0.51988035", "0.51941615", "0.5179884", "0.5179878", "0.5159318", "0.5158393", "0.51572585", "0.5156911", "0.5146848", "0.5139303", "0.51163846", "0.5108249", "0.5093531", "0.50800335", "0.5056124", "0.50550383", "0.5039629", "0.503599", "0.50350326", "0.5017609", "0.5017149", "0.500882", "0.50071186", "0.50038755", "0.50005776", "0.49958292", "0.49896255", "0.4985664", "0.4982965", "0.49818128", "0.49799496", "0.49785832", "0.49779838", "0.49779204", "0.4973262", "0.4967225", "0.49669284", "0.4966263", "0.4966084", "0.49584803", "0.49557778", "0.49387944", "0.4938789", "0.49368963", "0.49345854", "0.49258658", "0.49239764", "0.49137437", "0.4913335", "0.4912436", "0.49099764", "0.49072957", "0.48968863", "0.48954186", "0.48935357", "0.48927847", "0.4890293", "0.48874718", "0.48845693", "0.4881233", "0.4877495", "0.48760763", "0.4873524", "0.48719233", "0.48699364", "0.4867528", "0.48657075", "0.4861294", "0.48609498", "0.48596883", "0.48563698", "0.4856012", "0.48548603", "0.48546404", "0.48500413", "0.48497498", "0.48342848", "0.48327482", "0.4832731", "0.4829764", "0.48294696", "0.48133487" ]
0.63274735
3
formats raw string input into their appropriate values
def format_countOrFPKMTable(self,fpkmTracking_I): for fpkmTracking in fpkmTracking_I: for k,v in fpkmTracking.items(): if k=='tracking_id' and type(fpkmTracking['tracking_id'])==type('string'): pass; elif k!='tracking_id' and type(fpkmTracking[k])==type('string'): fpkmTracking[k] = eval(v); return fpkmTracking_I;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_raw_input(user_input):\n # Replace silly “ or ” characters with \"\n # TODO: Swap out with regex\n raw_input = user_input.strip().replace(\n '“', '\"').replace(\"”\", '\"').replace(\",\", \"\").replace(\"\\n\", \" \")\n # Break apart the string into each coordinate\n raw_inputs = [r.replace('\"', '') for r in raw_input.split('\" \"')]\n # Return coordinates as lists of ints.\n return [[int(i) for i in r.split(\" \")] for r in raw_inputs]", "def input_parser(input_string: str) -> str: \n if is_int(input_string):\n return input_string\n #he is int, give back plz.\n else:\n try:\n modified_input: str = input_string.strip()\n\n evaluatable_pairs: str = regex_splitter(modified_input)\n\n while not (is_int(evaluatable_pairs)):\n evaluatable_pairs = regex_splitter(evaluatable_pairs)\n\n return (evaluatable_pairs)\n\n except:\n raise Exception(\"Invalid Input\")", "def __call__(self, value): # noqa: D102\n if not isinstance(value, str):\n raise ValueError(f\"Input value must be a string. '{value}' is not.\")\n\n raw_value = value\n for c in self.remove_characters:\n value = value.replace(c, \"\")\n if not bool(re.match(f\"^[{self.allowed_characters},]+$\", value)):\n raise ValueError(\n f\"Input must only contain values '{self.allowed_characters},'. '{raw_value}' does not.\"\n )\n if not bool(\n re.match(\n f\"^([{self.allowed_characters}],)+[{self.allowed_characters}]$\", value\n )\n ):\n raise ValueError(\n f\"Input must have format '(?,?,?,?)'. '{raw_value}' does not.\"\n )\n if not all([c in value for c in self.required_characters]):\n raise ValueError(\n f\"Input must contain {self.required_characters}. '{raw_value}' does not.\"\n )\n return raw_value", "def parse(s):\n return s", "def get_processed_string(self, input_string):\n if input_string[:6] == '[sic]\"':\n return input_string[6: -1]\n else:\n return input_string.format(**self)", "def parse_string(self, data):\n pass", "def parse(self,value):\r\n\t\treturn str(value)", "def _parse_user_input(self):\n user_input = self.user_input.strip()\n if user_input:\n if user_input.find(',') > -1:\n # Location is either city/state or latitude/longitude.\n if user_input[0].isalpha():\n # City, state (lat/long handled elsewhere)\n city, state = [x.strip() for x in user_input.split(',')]\n self.city = city\n self.state = state\n elif (len(user_input) <= 10 and\n user_input[1].isdigit()): # 2nd char in US/Can. postal codes\n # Postal code\n self.postal_code = user_input.strip()", "def _sanitize_string(self, string):\n # get the type of a unicode string\n unicode_type = type(Pyasciigraph._u('t'))\n input_type = type(string)\n if input_type is str:\n if sys.version < '3':\n info = unicode(string)\n else:\n info = string\n elif input_type is unicode_type:\n info = string\n elif input_type is int or input_type is float:\n if sys.version < '3':\n info = unicode(string)\n else:\n info = str(string)\n else:\n info = str(string)\n return info", "def __normalize(self, ctx: commands.Context, format: str) -> str:\n\t\t# convert to lowercase\n\t\tlower_format = format.lower()\n\t\t# check if inputted format is recognized\n\t\tif lower_format in self.formats:\n\t\t\treturn lower_format\n\t\t# check for aliases\n\t\telif lower_format in self.aliases:\n\t\t\treturn self.aliases[lower_format]\n\t\t# format is not recognized\n\t\telse:\n\t\t\traise FriendlyError(\n\t\t\t\tf\"'{format}' is not a recognized format.\", ctx.channel, ctx.author\n\t\t\t)", "def __format_input_translator(str_to_process):\n return re.sub(r'\\([^)]*\\)', '', str_to_process).replace(' ', '').split('/')", "def parseString(self, s):\n pass", "def preprocess(inpString):\n\n figure = re.compile(r'\\([^\\W\\d_]+ [0-9]+\\.[0-9]+\\)') # i.e (Tablo 3.2)\n out_string = figure.sub('', inpString)\n\n digit_dot = re.compile(r'([0-9]+)\\.([0-9]{3})') # i.e 19.000 --> 19000\n out_string = digit_dot.sub(r'\\1\\2', out_string)\n out_string = digit_dot.sub(r'\\1\\2', out_string)\n\n centigrade = re.compile(r'(°C)|(°c)|(0C)') # °C --> santigrat\n out_string = centigrade.sub(r' santigrat', out_string)\n\n out_string = re.sub(r'°', ' derece', out_string) # ° --> derece\n\n digit_space = re.compile(r'([0-9]+) ([0-9]+)') # 19 000 --> 19000\n out_string = digit_space.sub(r'\\1\\2', out_string)\n\n out_string = re.sub(r'â', 'a', out_string) # Elâzig --> Elazig\n\n spec_hyphen = re.compile(r'([A-Za-z])-([0-9]+)') # G-20 --> G20\n out_string = spec_hyphen.sub(r'\\1\\2', out_string)\n\n out_string = re.sub(r'-', ' ', out_string) # replace hyphen with space\n\n out_string = re.sub(r'%|‰', 'yüzde ', out_string) # % --> yuzde\n\n year = re.compile(\"([0-9]{4})(’|')([a-z]+)\") # 1815'te --> 1815 yilinda\n out_string = year.sub(r'\\1 yılında', out_string)\n\n out_string = re.sub(r' km2', ' kilometrekare', out_string) # converting km2, m, km\n out_string = re.sub(r' m ', ' metre ', out_string)\n out_string = re.sub(r' km ', ' kilometre ', out_string)\n\n out_string = re.sub(r\"(’|')([a-züşöıç]+)\", '', out_string) # turkiye'de --> turkiye\n\n out_string = re.sub(r'([0-9]+),([0-9]+)', r'\\1CBN\\2', out_string) # replacing comma between\n # digits with a placeholder\n\n puncs = string.punctuation + '”' + '“' + '’' + '‘'\n translator = str.maketrans('', '', puncs)\n out_string = out_string.translate(translator) # removing pucntuations\n\n out_string = re.sub(r'CBN', ',', out_string) # bringing back the comma between numbers\n # out_string= out_string.split(' ') #[s.split(' ') for s in out_string.split('#')] #splitting from end of sentences\n # end sentence into words\n\n return out_string", "def decode(self, s):", "def decode(self, s):", "def format_input(self, args):\n\n new_list = []\n if args[1].find('{') != -1:\n new_list = self.format_dicti(args)\n return new_list\n else:\n new_list = []\n new_list.append(args[0])\n new_str = args[1][ args[1].find('(') + 2 : args[1].find(',') - 1]\n new_str += args[1][ args[1].find(',') : args[1].find(')') - 0]\n new_list.append(\" \".join(new_str.split(\", \") ) )\n\n return \" \".join(i for i in new_list)", "def value_from_str(self, s):\n if is_quoted_str(s):\n return s[1:-1]\n return super().value_from_str(s)", "def value_from_str(self, s):\n if is_quoted_str(s):\n return s[1:-1]\n return super().value_from_str(s)", "async def parse(self, raw: str) -> dict:", "def convert(string, sanitize=False):\n return r.convert(string, (preprocess if sanitize else False))", "def _handle_string(\n *, artifacts: types.ColumnArtifacts\n) -> typing.Union[String, Binary, Date, DateTime]:\n if artifacts.open_api.format in {None, \"byte\", \"password\"}:\n if artifacts.open_api.max_length is None:\n return String\n return String(length=artifacts.open_api.max_length)\n if artifacts.open_api.format == \"binary\":\n if artifacts.open_api.max_length is None:\n return Binary\n return Binary(length=artifacts.open_api.max_length)\n if artifacts.open_api.format == \"date\":\n return Date\n if artifacts.open_api.format == \"date-time\":\n return DateTime\n raise exceptions.FeatureNotImplementedError(\n f\"{artifacts.open_api.format} format for string is not supported.\"\n )", "def get_params(raw):\n parts = raw.split(\" \", 1)\n return None if len(parts) == 1 else parts[1]", "def parse(self,value):\r\n\t\treturn unicode(value)", "def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])", "def tidy_string(s: str\n ) -> str:\n s = s.encode('ascii', errors='ignore').decode(FORMAT)\n s = s.replace(\"\\r\", \"\").replace(\"\\t\", \"\").replace('\\n', '') \n return s", "def autoconvert(s):\n try:\n return eval(s)\n except:\n return s", "def preprocess_data(raw_text):\n try:\n raw_text = raw_text.replace(\"?\", \"Unknown\")\n raw_text = raw_text.replace(\" or \", \", \")\n raw_text = raw_text.replace(\" and \", \", \")\n raw_text = re.sub(\"\\[.*?\\]\", \"\", raw_text)\n raw_text = re.sub(\"\\(.*?\\)\", \"\", raw_text)\n return raw_text.strip().lower()\n except Exception as e:\n print(e.__str__())\n raise e", "def vsepr_parse_user_answer(user_input):\r\n return json.loads(user_input)", "def BlindConvertStringToProperValue(value: str, variables: dict, report, run_once=False, allowPrefixArithmetic=False) -> (object, FClass.FiMVariableTypes):\n if value == \"nothing\":\n return 0, FClass.FiMVariableTypes.NUMBER\n\n # Char\n if value.startswith(\"'\") and value.endswith(\"'\") and len(value[1:-1]) == 1:\n return value, FClass.FiMVariableTypes.CHAR\n #return value[1], FClass.FiMVariableTypes.CHAR\n # String\n if value.startswith('\"') and value.endswith('\"'):\n return value, FClass.FiMVariableTypes.STRING\n # Number\n try:\n if _isFloat(value):\n return float(value), FClass.FiMVariableTypes.NUMBER\n return int(value), FClass.FiMVariableTypes.NUMBER\n except:\n pass\n # Boolean\n try:\n return ConvertBooleanString( value ), FClass.FiMVariableTypes.BOOL\n except:\n pass\n # Variable Value\n if value in variables:\n #if variables[value]._isArray(True):\n # raise Exception(\"Cannot convert value from array\")\n return variables[value].getv(), variables[value].Type\n # Array length\n if value.startswith(\"length of \"):\n var_name = value[len(\"length of \"):]\n if not var_name in variables:\n raise Exception(f\"Cannot find variable {var_name}\")\n if not variables[var_name]._isArray():\n raise Exception(\"Cannot get length of non-array variable\")\n return len(variables[var_name].getsv()), FClass.FiMVariableTypes.NUMBER\n # Ascii number to character\n if value.startswith(\"raw char of \"):\n var_name = value[len(\"raw char of \"):]\n var_value, guessed_type = BlindConvertStringToProperValue(var_name, variables, report, True)\n if guessed_type != FClass.FiMVariableTypes.NUMBER:\n raise Exception(\"Cannot get ASCII value of a non-number value\")\n return chr(var_value), FClass.FiMVariableTypes.CHAR\n # Character to ascii number\n if value.startswith(\"raw num of \"):\n var_name = value[len(\"raw num of \"):]\n var_value, guessed_type = BlindConvertStringToProperValue(var_name, variables, report, True)\n if guessed_type != FClass.FiMVariableTypes.CHAR:\n raise Exception(\"Cannot get ASCII number of a non-char value\")\n if var_value.startswith(\"'\") and var_value.endswith(\"'\"):\n var_value = var_value[1]\n return ord(var_value), FClass.FiMVariableTypes.NUMBER\n # Number to string\n if value.startswith(\"string of \"):\n var_name = value[len(\"string of \"):]\n var_value, guessed_type = BlindConvertStringToProperValue(var_name, variables, report, True)\n if guessed_type != FClass.FiMVariableTypes.NUMBER:\n raise Exception(\"Cannot convert a non-number value to string\")\n return f'\"{var_value}\"', FClass.FiMVariableTypes.STRING\n # String/Bool/Char to number\n if value.startswith(\"number of \"):\n var_name = value[len(\"number of \"):]\n var_value, guessed_type = BlindConvertStringToProperValue(var_name, variables, report, True)\n if guessed_type == FClass.FiMVariableTypes.STRING or guessed_type == FClass.FiMVariableTypes.CHAR:\n var_value = var_value[1:-1]\n if _isFloat(var_value):\n return float(var_value), FClass.FiMVariableTypes.NUMBER \n return int(var_value), FClass.FiMVariableTypes.NUMBER\n elif guessed_type == FClass.FiMVariableTypes.BOOL:\n return 1 if var_value else 0, FClass.FiMVariableTypes.NUMBER\n else:\n raise Exception(\"Cannot convert non-string or non-bool value to number\")\n # Square root\n if value.startswith( FGlobal.Methods[\"Variable Square Root\"] ):\n sqrt_prefix = _findFirst(FGlobal.Methods[\"Variable Square Root\"], lambda x: value.startswith(x))\n var_name = value[len(sqrt_prefix):]\n var_value, guessed_type = BlindConvertStringToProperValue(var_name, variables, report, True)\n if guessed_type != FClass.FiMVariableTypes.NUMBER:\n raise Exception(\"Cannot square root a non-number value\")\n return math.sqrt(var_value), FClass.FiMVariableTypes.NUMBER\n # Paragraph return 1\n if value in report.Paragraphs:\n if report.Paragraphs[ value ].Return_Type is None:\n raise Exception(\"Paragraph doesn't return anything\")\n return report.RunParagraph(value).getv(), report.Paragraphs[ value ].Return_Type\n # Paragraph return 2\n if value.split(' using ')[0] in report.Paragraphs:\n paragraph_name = value.split(' using ')[0]\n if report.Paragraphs[ paragraph_name ].Return_Type is None:\n raise Exception(\"Paragraph doesn't return anything\")\n paragraph_param = ' using '.join( value.split(' using ')[1:] )\n params = list()\n for param_index, param in enumerate( paragraph_param.split(' and ') ):\n if param_index >= len(report.Paragraphs[ paragraph_name ].Parameters): break \n param_type,param_len = GrabVariableInitializerType(param)\n if param_type!=FClass.FiMVariableTypes.UNKNOWN:\n param = param[param_len:].strip()\n param_value, guessed_type = BlindConvertStringToProperValue(param, variables, report, run_once=True)\n if guessed_type != report.Paragraphs[ paragraph_name ].Parameters[ param_index ].Type:\n raise Exception(\"Invalid variable type\")\n params.append(param_value)\n return report.RunParagraph(paragraph_name, params).getv(), report.Paragraphs[ paragraph_name ].Return_Type\n # Variable array value 1 and 2\n if (re.sub(r\" (\\d+)$\", \"\", value) in variables) or (\" of \" in value):\n var_name, var_index = GetVariableArrayDetails(value, variables, report)\n if var_name is None and var_index is None:\n raise Exception(\"Invalid variable array initializer\")\n return variables[var_name].getav(var_index), FClass.FiMVariableTypes.GetArrayType( variables[var_name].Type )\n # Arithmetic\n arith_check, arith_value = FCalc.FiMArithmetic.IsArithmetic( value )\n if arith_check:\n if (allowPrefixArithmetic) or (not allowPrefixArithmetic and not arith_value.is_prefix):\n arith = FCalc.FiMArithmetic(value, arith_value)\n arith.GetValues(variables, report)\n return arith.Calculate(), FClass.FiMVariableTypes.NUMBER\n\n # Return\n if run_once:\n raise Exception(f\"Cannot blind convert value {value}\")\n else:\n return SanitizeString(value, variables, report), FClass.FiMVariableTypes.STRING", "def raw(string):\n string = string or \"\"\n return string.replace(\"{\", \"{{\").replace(\"|\", \"||\")", "def value_from_str(self, s):\n raise ValueError()", "def _process_value(self, value):\n if isinstance(value, str):\n try:\n value.decode('ascii')\n except UnicodeDecodeError:\n return unicode(''.join([unichr(ord(c)) for c in value]))\n return value", "def get_data_from_nonformat_text():\n pass", "def beautify(self, string):\n\n\t\tif not string:\n\t\t\treturn string\n\n\t\t# string may differ because of escaped characters\n\t\tstring, phrases = self.parse(string)\n\n\t\tif not phrases:\n\t\t\treturn string\n\n\t\tif not self.positional and not self.always:\n\t\t\traise errors.ArgumentError(\"Found phrases, but no styles \"\n\t\t\t\t\t\t\t\t\t \"were supplied!\")\n\n\t\treturn self.stringify(string, phrases)", "def _normalize(fmt):\n if re.search(r\"\\d\\s+\", fmt):\n raise StructError(\"bad char in struct format\")\n return fmt.replace(\" \", \"\")", "def haiku_string_parser():\n pass", "def parse(self, input):\n pass", "def format_value(text):\n return text.encode('utf8').replace('\\n', ' ').replace('\\r', ' ')", "def get_correct_data_types(user_input, answer_type, alpha_string):\n if answer_type == int:\n try:\n user_input = int(user_input)\n except ValueError:\n user_input = None\n ui.print_error_message(\"Wrong value provided.\\n\")\n\n elif answer_type == str:\n if alpha_string:\n user_input_copy = user_input.replace(' ', '')\n\n if not user_input_copy.isalpha():\n user_input = None\n ui.print_error_message('It not alpha string.')\n\n return user_input", "def normalizeRawFromHeader(value):\n return value.replace('\\n', '').replace('\\r', '').strip()", "def from_str(cls, string):", "def scanf(format, string):\n re_fmt = compile(format)\n result = re_fmt.scanf(string)\n _log.debug('%r <- %r = %r', format, string, result)\n return result", "def prepare_input(self, extracted_str):\n\n # Remove withspace\n if self.options['remove_whitespace']:\n optimized_str = re.sub(' +', '', extracted_str)\n else:\n optimized_str = extracted_str\n \n # Remove accents\n if self.options['remove_accents']:\n optimized_str = unidecode(optimized_str)\n\n # specific replace\n for replace in self.options['replace']:\n assert len(replace) == 2, 'A replace should be a list of 2 items'\n optimized_str = optimized_str.replace(replace[0], replace[1])\n\n return optimized_str", "def format_input(user_input):\n values = [int(i) for i in user_input.strip().split(\" \")]\n start = Coord(values[0], values[1])\n end = Coord(values[2], values[3])\n return Zone(start, end)", "def test_prepare_value_string(self):\n field = FractionField()\n result = field.prepare_value(\"1/4\")\n self.assertEqual(\"1/4\", result)\n\n result = field.prepare_value(\"1 1/4\")\n self.assertEqual(\"1 1/4\", result)", "def normalize_string(string):\n return string.replace(\"\\xa0\", \" \")\\\n .replace(\"\\\\\", \"\")\\\n .replace(\"-LRB-\", \"(\")\\\n .replace(\"-RRB-\", \")\")\\\n .replace(\"-LCB-\", \"{\")\\\n .replace(\"-RCB-\", \"}\")\\\n .replace(\"-LSB-\", \"[\")\\\n .replace(\"-RSB-\", \"]\")", "def format_input(input):\n return f\"{input}> \"", "def _massage_raw_pg_output_vals(self):\n pass", "def __parse__(self, filter):\n \n if filter == 'zipcode':\n # Return 5 digit zip or, if applicable, Concatenate 5 digit and \n # 4 digit zipcode\n if self.data['Mailing Zip 4']:\n return \"%s-%s\" %(str(self.data['Mailing Zip Code'])[:-2],\n str(self.data['Mailing Zip 4'])[:-2]\n )\n else:\n return str(self.data['Mailing Zip Code'])[:-2]\n elif filter == 'employee_count':\n # Convert employee count string to digit\n pattern = '.+to\\s([0-9]+)'\n try:\n return re.findall(\n pattern, self.data['Location Employee Size Range'])[0]\n except IndexError:\n pass\n elif filter == 'phone':\n # Regex phone number digits and concatenate\n number = ''.join(re.findall('[0-9]+', \n self.data['Phone Number Combined']))\n return number if len(number) == 10 else 0", "def format(self, data):", "def _parse_input(self):\n #temperature\n regex = re.compile(\"TEMP=(\\d+\\.\\d*|\\d+)\")\n r = regex.search(self.file_dic['input'])\n if r:\n self.temperature = r.groups()[0]\n else:\n self.temperature = 298.15\n #theory\n regex = re.compile('(\\$contrl.+\\$end|\\$basis.+ \\$end)')\n temp_theory = regex.findall(self.file_dic['input'])\n contrl = temp_theory[0][:-4][7:].strip()\n basis = temp_theory[1][:-4][6:].strip()\n self.theory = contrl + ' ' + basis", "def parseInput(self, inputString):\n stringArray = self.inputChecker(inputString)\n if stringArray == \"Error\":\n return stringArray\n # Correctly assigns negative numbers\n stringArray = self.negativeCheck(stringArray)\n # Checks whether any of the numbers have been written in scientific notation\n stringArray = self.scientificNotationCheck(stringArray)\n# print(stringArray)\n return stringArray", "def format_data(self, data):", "def simple_format_string(self, value, force_raw=False):\n\n m = re.search(r'((\\n)|\\r|\\n|\\t|\\\\)', value)\n newlines = False\n raw = ''\n if m:\n if m.group(2):\n newlines = True\n raw = 'r'\n if force_raw and not raw:\n raw = 'r'\n\n single = self.needs_escape(value, '\\'')\n double = self.needs_escape(value, '\"')\n tsingle = self.needs_escape(value, '\\'', quote_count=3)\n tdouble = self.needs_escape(value, '\"', quote_count=3)\n if not double and not newlines:\n string = '%s\"%s\"' % (raw, value)\n elif not single and not newlines:\n string = '%s\\'%s\\'' % (raw, value)\n elif not tdouble:\n string = '%s\"\"\"%s\"\"\"' % (raw, value)\n elif not tsingle:\n string = '%s\\'\\'\\'%s\\'\\'\\'' % (raw, value)\n elif not newlines:\n string = '%s\"%s\"' % (raw, self.fix_escape(value, '\"'))\n else:\n string = '%s\"\"\"%s\"\"\"' % (raw, self.fix_escape(value, '\"', quote_count=3))\n return string", "def preprocess_raw(self):\n pass", "def __str__(self):\n\n\t\tif self.rawValue == None: return str()\n\n\t\tx = self.rawValue\n\n\t\tif not x.isdigit() or len(x) != 44 or len(set(x)) == 1:\n\t\t\treturn self.rawValue\n\n\t\treturn '{} {} {} {} {} {} {} {} {} {} {}'.format(x[:4], x[4:8], x[8:12], x[12:16], x[16:20], x[20:24], x[24:28], x[28:32], x[32:36], x[36:40], x[40:44])", "def ConvertUnknownStringToProperValue(value: str, vtype: FClass.FiMVariableTypes):\n if value == \"nothing\":\n no_value = {\n FClass.FiMVariableTypes.BOOL: False,\n FClass.FiMVariableTypes.NUMBER: 0,\n FClass.FiMVariableTypes.STRING: \"\\\"\\\"\",\n FClass.FiMVariableTypes.CHAR: \"'?'\"\n }\n if vtype in no_value: return no_value[ vtype ]\n \n if vtype == FClass.FiMVariableTypes.CHAR:\n if not value.startswith(\"'\") and not value.endswith(\"'\") and len(value[1:-1]) != 1:\n raise Exception(f\"Invalid char {value}\")\n return value\n if vtype == FClass.FiMVariableTypes.STRING:\n if not value.startswith('\"') and not value.endswith('\"'):\n raise Exception(f\"Invalid string {value}\")\n return value\n if vtype == FClass.FiMVariableTypes.NUMBER:\n try:\n return float(value)\n except:\n raise Exception(f\"Invalid number {value}\")\n if vtype == FClass.FiMVariableTypes.BOOL:\n try:\n return ConvertBooleanString( value )\n except:\n raise Exception(f\"Invalid boolean {value}\")", "def process(line, form):\n return unicodedata.normalize(form, line)", "def string_from_interwebs(input_value):\n \n return escape(unquote(input_value))", "def _convert_metastr(metastr, dtype=None):\n if dtype is None:\n dtype = str\n\n if issubclass(dtype, tuple):\n return tuple(float(f.strip()) for f in metastr.split(\",\"))\n else:\n return dtype(metastr)", "def test_format_phone_raw(self):\n number = '8095551234'\n self.assertEqual(format_phone(number), '(809) 555-1234')", "def _decode_string(box_string):\r\n if box_string == \"no_box\":\r\n return np.zeros((0,4))\r\n else:\r\n try:\r\n boxes = np.array([np.array([int(eval(i)) for i in box.split(\" \")])\r\n for box in box_string.split(\";\")])\r\n return boxes\r\n except:\r\n print(box_string)\r\n print(\"Submission is not well formatted. empty boxes will be returned\")\r\n return np.zeros((0,4))", "def reformat(ctx):\n pass", "def parse_input(userstring):\n xsplit = userstring.split()\n stringtovalues = [float(x) for x in xsplit]\n\n return stringtovalues", "def form(s):\r\n \r\n # removes leading and trailing apostrophe's from string\r\n s = s.strip(\"'\")\r\n \r\n # converts HTML hex back to characters\r\n s = s.replace(\"&#39;\", \"'\")\r\n s = s.replace(\"&#8217;\", \"’\")\r\n s = s.replace(\"&#8216;\", '\"')\r\n s = s.replace(\"&#8221;\", \"'\")\r\n s = s.replace(\"&#8220;\", \"'\")\r\n \r\n # success\r\n return s", "def decode_string(self, value):\r\n return value", "def preparse(self, raw):\n return raw", "def normalize(self, text: str) -> str:", "def _convert(string, type, message):\n try:\n return type(string)\n except ValueError as e:\n print(e)\n raise CharmmPSFError('Could not convert %s' % message)", "def normalize_string(string):\n return string.replace(u\"\\xa0\", \" \")\\\n .replace(\"\\\\\", \"\")\\\n .replace(\"-LRB-\", \"(\")\\\n .replace(\"-RRB-\", \")\")\\\n .replace(\"-LCB-\", \"{\")\\\n .replace(\"-RCB-\", \"}\")\\\n .replace(\"-LSB-\", \"[\")\\\n .replace(\"-RSB-\", \"]\")", "def parse_input(input_data):\n cleaned_input = input_data.replace('@ ', '').replace(':', '')\n split_data = cleaned_input.split(' ')\n xy = split_data[1].split(',')\n wh = split_data[2].split('x')\n return FabricRectangle(\n claim_id=split_data[0],\n x=int(xy[0]),\n y=int(xy[1]),\n width=int(wh[0]),\n height=int(wh[1])\n )", "def string_to_param(self,string):\n\n if (string.startswith(\"log_\")): return math.log10(self.string_to_param(string[4:]))\n if (string.startswith(\"ln_\")): return math.log(self.string_to_param(string[3:]))\n if (string.startswith(\"exp_\")): return math.exp(self.string_to_param(string[4:]))\n if (string == \"Mass\"): return self.glb[imass]/constants.solar_mass\n if (string == \"Radius\"): return self.glb[iradius]/constants.solar_radius\n if (string == \"Luminosity\"): return self.glb[iluminosity]/constants.solar_luminosity\n if (string == \"Z\"): return self.glb[iz0]\n if (string == \"Y\"): return 1.0-self.glb[iz0]-self.glb[ix0]\n if (string == \"X\"): return self.glb[ix0]\n if (string == \"Ys\"): return 1.0-self.glb[user_params_index[\"Zs\"]]-self.glb[user_params_index[\"Xs\"]]\n if (string == \"zsx_s\"): return self.zsx_s\n if (string == \"zsx_0\"): return self.zsx_0\n if (string == \"Fe_H\"): return self.FeH\n if (string == \"M_H\"): return self.MH\n if (string == \"Age\"): return self.glb[iage]\n if (string == \"Teff\"): return self.glb[itemperature]\n if (string == \"Dnu\"): return self.find_large_separation()*self.glb[ifreq_ref]\n if (string == \"numax\"): return self.numax\n if (string == \"Rho\"): return 3.0*self.glb[imass]/(4.0*math.pi*self.glb[iradius]**3)\n if (string == \"g\"): return constants.G*self.glb[imass]/self.glb[iradius]**2\n if (string == \"beta_Sonoi2015\"): return self.beta_Sonoi2015\n if (string == \"b_Kjeldsen2008\"): return self.b_Kjeldsen2008\n\n try:\n return self.glb[user_params_index[string]]\n except KeyError:\n sys.exit(\"ERROR: unrecognised model quantity: \"+string)", "def normalize_format(fmt):\n # Remove shape '()' at the forefront which is equivalent to an scalar\n if fmt[:2] == '()':\n fmt = fmt[2:]\n # Accept 'S' as a synonym of 'a'\n if fmt.find('S') >= 0:\n fmt = fmt.replace('S', 'a')\n return fmt", "def get_formatted_string(self, input_string):\n if isinstance(input_string, str):\n try:\n return self.get_processed_string(input_string)\n except KeyError as err:\n # Wrapping the KeyError into a less cryptic error for end-user\n # friendliness\n missing_key = err.args[0]\n raise KeyNotInContextError(\n f'Unable to format \\'{input_string}\\' with '\n f'{{{missing_key}}}, because '\n f'context[\\'{missing_key}\\'] doesn\\'t exist') from err\n else:\n raise TypeError(f\"can only format on strings. {input_string} is a \"\n f\"{type(input_string)} instead.\")", "def native_string(input_var):\n if isinstance(input_var, str):\n return input_var\n\n return input_var.decode('utf-8', 'replace')", "def handle_input(data: dict):", "def preprocess(string):\r\n # string = [strQ2B(ch) for ch in string.strip()]\r\n # return ''.join(string)\r\n return string", "def cleanData(rawData):\n\trawData = re.sub(r'R-LRB- \\(', r'R-LRB- -LRB-', rawData)\n\trawData = re.sub(r'R-RRB- \\)', r'R-RRB- -RRB-', rawData)\n\trawData = re.sub(r'R-RRB- \\(', r'R-RRB- -LRB-', rawData)\n\trawData = re.sub(r'-LRB- \\(', r'-LRB- -LRB-', rawData)\n\trawData = re.sub(r'-RRB- \\)', r'-RRB- -RRB-', rawData)\n\trawData = re.sub(r'PU \\(', r'PU -LRB-', rawData)\n\trawData = re.sub(r'PU \\)', r'PU -RRB-', rawData)\n\trawData = re.sub(r':-\\)', r'smileyface', rawData)\n\n\treturn rawData", "def convert_str(input_obj):\n if isinstance(input_obj, str):\n return(input_obj)\n elif input_obj is True:\n return('1')\n elif (input_obj is None):\n return('none')\n elif input_obj is False:\n return('0')\n else:\n return(str(input_obj))", "def getString(self):\n print \"Enter String value:\",\n self.string = raw_input()", "def _parseSingle(string):\n string = string.strip()\n \n if len(string) == 0:\n return ''\n \n pattern = re.compile(r'[^0-9]')\n if not pattern.search(string):\n return int(string)\n pattern = re.compile(r'[^0-9\\.eE]')\n if not pattern.search(string):\n if (string.count('.') <= 1 and \n (string.count('e') + string.count('E') <= 1)):\n return float(string)\n \n boolValue = _bool(string)\n if boolValue is not None:\n return boolValue\n \n if string[0] == string[-1]:\n if string[0] == '\"' or string[0] == \"'\":\n return string[1:-1]\n elif string[1] == string[-1]:\n if ((string[0] == 'u' or string[0] == 'r') and \n (string[1] == '\"' or string[1] == \"'\")):\n return string[2:-1]\n \n if string == 'None':\n return None\n \n return string", "def _sanitize_string_for_python(self, s):\n s = repr(s)\n\n if s.startswith('u'):\n s = s[1:]\n\n return s", "def main():\n haiku_string = \"clouds ,mur,mur ,dark,ly/it ,is ,a ,blin,ding ,ha,bit/ga,zing ,at ,the ,moon\"\n formatted_haiku = haiku_string_parser(haiku_string)\n print(formatted_haiku)", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def _parameterize_string(raw):\n\n parts = []\n s_index = 0\n\n for match in _PARAMETER_PATTERN.finditer(raw):\n parts.append(raw[s_index:match.start()])\n parts.append({u\"Ref\": match.group(1)})\n s_index = match.end()\n\n if not parts:\n return GenericHelperFn(raw)\n\n parts.append(raw[s_index:])\n return GenericHelperFn({u\"Fn::Join\": [u\"\", parts]})", "def parse_value(string: str) -> Union[str, dict, bool, int, float]:\n unesc_str = unescape(string)\n stripped = string.strip()\n if REGEX_RE.match(stripped):\n return {\"regex\": unesc_str.strip()[7:-2]}\n elif BOOL_RE.match(stripped):\n return stripped.lower() == \"true\"\n elif INT_RE.match(stripped):\n return int(stripped)\n elif FLOAT_RE.match(stripped):\n return float(stripped)\n else:\n return unesc_str[1:-1]", "def process_input(self, message: str) -> str:\n processed_input = message\n # Convert user ids to just nick names\n processed_input.replace(\"@\"+self.user.name+\"#\"+self.user.discriminator, \"\")\n processed_input.replace(\"@\"+self.user.name, \"\")\n return processed_input", "def parse_input_string(self, string_name):\n list_of_parts = string_name.split(\".\")\n if list_of_parts[0] == \"inputs\":\n return string_name\n else:\n # return only the integer part\n return int(list_of_parts[1])", "def psych(output_str, input_str_1, input_1, input_str_2, input_2, pressure=29.92):\n\n dry_bulb = 0\n wet_bulb = 0\n dewpoint = 0\n rel_hum = 0\n hum_rat = 0\n spec_vol = 0\n enthalpy = 0\n pressure *= 0.491154\n\n if input_str_1 in ('db', 'DB', 'wb', 'WB', 'dp', 'DP', 'rh', 'RH', 'hr', 'HR', 'sv', 'SV', 'en', 'EN') and \\\n input_str_2 in ('db', 'DB', 'wb', 'WB', 'dp', 'DP', 'rh', 'RH', 'hr', 'HR', 'sv', 'SV', 'en', 'EN'):\n\n if input_str_1 in ('db', 'DB'):\n dry_bulb = input_1\n elif input_str_2 in ('db', 'DB'):\n dry_bulb = input_2\n\n if input_str_1 in ('wb', 'WB'):\n wet_bulb = input_1\n elif input_str_2 in ('wb', 'WB'):\n wet_bulb = input_2\n\n if input_str_1 in ('dp', 'DP'):\n dewpoint = input_1\n elif input_str_2 in ('dp', 'DP'):\n dewpoint = input_2\n\n if input_str_1 in ('rh', 'RH'):\n rel_hum = input_1 / 100\n elif input_str_2 in ('rh', 'RH'):\n rel_hum = input_2 / 100\n\n if input_str_1 in ('hr', 'HR'):\n hum_rat = input_1\n elif input_str_2 in ('hr', 'HR'):\n hum_rat = input_2\n\n if input_str_1 in ('sv', 'SV'):\n spec_vol = input_1\n elif input_str_2 in ('sv', 'SV'):\n spec_vol = input_2\n\n if input_str_1 in ('en', 'EN'):\n enthalpy = input_1\n elif input_str_2 in ('en', 'EN'):\n enthalpy = input_2\n else:\n return ValueError('Invalid input types')\n\n if hum_rat < 0:\n return ValueError('Humidity ratio less than 0')\n if rel_hum < 0 or rel_hum > 1:\n return ValueError('Relative humidity less than 0 or greater than 100')\n\n ############################################################################################\n\n if input_str_1 in ('db', 'DB') or input_str_2 in ('db', 'DB'):\n if output_str in ('db', 'DB'):\n return dry_bulb\n\n db_r = dry_bulb + 459.67\n\n if input_str_1 in ('wb', 'WB') or input_str_2 in ('wb', 'WB'):\n if output_str in ('wb', 'WB'):\n return wet_bulb\n\n db_r = dry_bulb + 459.67\n wb_r = wet_bulb + 459.67\n\n pres_wb_sat = sat_pres(wb_r)\n\n hr_wb_sat = 0.62198 * pres_wb_sat / (pressure - pres_wb_sat)\n\n hum_rat = (hr_wb_sat * (1093 - .556 * wet_bulb) - 0.24 * (dry_bulb - wet_bulb)) / \\\n (1093 + .444 * dry_bulb - wet_bulb)\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n pres_db_sat = sat_pres(db_r)\n\n hr_db_sat = 0.62198 * pres_db_sat / (pressure - pres_db_sat)\n\n mu = hum_rat / hr_db_sat\n\n rel_hum = mu / (1 - (1 - mu) * (pres_db_sat / pressure))\n if rel_hum < 0 or rel_hum > 1:\n return -1 # ValueError('Calculated relative humidity less than 0')\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n\n if output_str in ('sv', 'SV'):\n spec_vol = 1545.32 * db_r * (1 + 1.6078 * hum_rat) / (28.9645 * pressure * 144)\n return spec_vol\n\n if output_str in ('en', 'EN'):\n enthalpy = 0.24 * dry_bulb + hum_rat * (1061 + 0.444 * dry_bulb)\n return enthalpy\n\n if output_str in ('dp', 'DP'):\n pres_vapor = (pressure * hum_rat) / (0.62198 + hum_rat)\n\n dewpoint = calc_dewpoint(pres_vapor)\n return dewpoint\n else:\n return ValueError('Unknown output request')\n elif input_str_1 in ('dp', 'DP') or input_str_2 in ('dp', 'DP'):\n if output_str in ('dp', 'DP'):\n return dewpoint\n\n dp_r = dewpoint + 459.67\n\n pres_vapor = sat_pres(dp_r)\n\n hum_rat = 0.62198 * pres_vapor / (pressure - pres_vapor)\n if hum_rat < 0:\n return -1 # ValueError('Calculated humidity ratio below 0')\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n pres_db_sat = sat_pres(db_r)\n\n hr_db_sat = 0.62198 * pres_db_sat / (pressure - pres_db_sat)\n\n mu = hum_rat / hr_db_sat\n\n rel_hum = mu / (1 - (1 - mu) * (pres_db_sat / pressure))\n if rel_hum < 0 or rel_hum > 1:\n return -1\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n\n if output_str in ('sv', 'SV'):\n spec_vol = 1545.32 * db_r * (1 + 1.6078 * hum_rat) / (28.9645 * pressure * 144)\n return spec_vol\n\n if output_str in ('en', 'EN'):\n enthalpy = 0.24 * dry_bulb + hum_rat * (1061 + 0.444 * dry_bulb)\n return enthalpy\n\n if output_str in ('wb', 'WB'):\n wet_bulb = calc_wetbulb(dry_bulb, hum_rat, pressure)\n return wet_bulb\n else:\n return ValueError('Unknown output request')\n elif input_str_1 in ('rh', 'RH') or input_str_2 in ('rh', 'RH'):\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n\n pres_db_sat = sat_pres(db_r)\n\n pres_vapor = pres_db_sat * rel_hum\n\n hum_rat = 0.62198 * pres_vapor / (pressure - pres_vapor)\n if hum_rat < 0:\n return -1\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n # hr_db_sat = 0.62198 * pres_db_sat / (pressure - pres_db_sat)\n\n # mu = hum_rat / hr_db_sat\n\n if output_str in ('sv', 'SV'):\n spec_vol = 1545.32 * db_r * (1 + 1.6078 * hum_rat) / (28.9645 * pressure * 144)\n return spec_vol\n\n if output_str in ('en', 'EN'):\n enthalpy = 0.24 * dry_bulb + hum_rat * (1061 + 0.444 * dry_bulb)\n return enthalpy\n\n if output_str in ('dp', 'DP'):\n dewpoint = calc_dewpoint(pres_vapor)\n return dewpoint\n\n if output_str in ('wb', 'WB'):\n wet_bulb = calc_wetbulb(dry_bulb, hum_rat, pressure)\n return wet_bulb\n else:\n return ValueError('Unknown output request')\n elif input_str_1 in ('hr', 'HR') or input_str_2 in ('hr', 'HR'):\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n pres_db_sat = sat_pres(db_r)\n\n pres_vapor = hum_rat * pressure / (hum_rat + 0.62198)\n\n rel_hum = pres_vapor / pres_db_sat\n if rel_hum < 0 or rel_hum > 1:\n return -1\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n\n # hr_db_sat = 0.62198 * pres_db_sat / (pressure - pres_db_sat)\n\n # mu = hum_rat / hr_db_sat\n\n if output_str in ('sv', 'SV'):\n spec_vol = 1545.32 * db_r * (1 + 1.6078 * hum_rat) / (28.9645 * pressure * 144)\n return spec_vol\n\n if output_str in ('en', 'EN'):\n enthalpy = 0.24 * dry_bulb + hum_rat * (1061 + 0.444 * dry_bulb)\n return enthalpy\n\n if output_str in ('dp', 'DP'):\n dewpoint = calc_dewpoint(pres_vapor)\n return dewpoint\n\n if output_str in ('wb', 'WB'):\n wet_bulb = calc_wetbulb(dry_bulb, hum_rat, pressure)\n return wet_bulb\n else:\n return ValueError('Unknown output request')\n elif input_str_1 in ('sv', 'SV') or input_str_2 in ('sv', 'SV'):\n if output_str in ('sv', 'SV'):\n return spec_vol\n\n # pres_db_sat = sat_pres(db_r)\n\n hum_rat = (spec_vol * 28.9645 * (pressure * 144) / (1545.32 * db_r) - 1) / 1.6078\n if hum_rat < 0:\n return -1\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n pres_vapor = hum_rat * pressure / (hum_rat + 0.62198)\n\n rel_hum = pres_vapor / pressure\n if rel_hum < 0 or rel_hum > 1:\n return -1\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n\n if output_str in ('en', 'EN'):\n enthalpy = 0.24 * dry_bulb + hum_rat * (1061 + 0.444 * dry_bulb)\n return enthalpy\n\n if output_str in ('dp', 'DP'):\n dewpoint = calc_dewpoint(pres_vapor)\n return dewpoint\n\n if output_str in ('wb', 'WB'):\n wet_bulb = calc_wetbulb(dry_bulb, hum_rat, pressure)\n return wet_bulb\n else:\n return ValueError('Unknown output request')\n elif input_str_1 in ('en', 'EN') or input_str_2 in ('en', 'EN'):\n if output_str in ('en', 'EN'):\n return enthalpy\n\n pres_db_sat = sat_pres(dry_bulb)\n\n hum_rat = (enthalpy - 0.24 * dry_bulb) / (1061 + 0.444 * dry_bulb)\n if hum_rat < 0:\n return -1\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n pres_vapor = hum_rat * pressure / (hum_rat + 0.62198)\n\n rel_hum = pres_vapor / pres_db_sat\n if rel_hum < 0 or rel_hum > 1:\n return -1\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n\n # hr_db_sat = 0.62198 * pres_db_sat / (pressure - pres_db_sat)\n\n # mu = hum_rat / hr_db_sat\n\n if output_str in ('sv', 'SV'):\n spec_vol = 1545.32 * db_r * (1 + 1.6078 * hum_rat) / (28.9645 * pressure * 144)\n return spec_vol\n\n if output_str in ('dp', 'DP'):\n dewpoint = calc_dewpoint(pres_vapor)\n return dewpoint\n\n if output_str in ('wb', 'WB'):\n wet_bulb = calc_wetbulb(dry_bulb, hum_rat, pressure)\n return wet_bulb\n else:\n return ValueError('Unknown output request')\n elif input_str_1 in ('wb', 'WB') or input_str_2 in ('wb', 'WB'):\n if output_str in ('wb', 'WB'):\n return wet_bulb\n\n wb_r = wet_bulb + 459.67\n\n if input_str_1 in ('dp', 'DP') or input_str_2 in ('dp', 'DP'):\n if output_str in ('dp', 'DP'):\n return dewpoint\n\n dp_r = dewpoint + 459.67\n\n pres_vapor = sat_pres(dp_r)\n\n hum_rat = 0.62198 * pres_vapor / (pressure - pres_vapor)\n if hum_rat < 0:\n return -1\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n pres_wb_sat = sat_pres(wb_r)\n\n hr_wb_sat = 0.62198 * pres_wb_sat / (pressure - pres_wb_sat)\n\n dry_bulb = ((1093 - 0.556 * wet_bulb) * hr_wb_sat + 0.24 * wet_bulb - (1093 - wet_bulb) * hum_rat) / \\\n (0.444 * hum_rat + 0.24)\n if output_str in ('db', 'DB'):\n return dry_bulb\n\n db_r = dry_bulb + 459.67\n\n pres_db_sat = sat_pres(dry_bulb)\n\n hr_db_sat = 0.62198 * pres_db_sat / (pressure - pres_db_sat)\n mu = hum_rat / hr_db_sat\n\n rel_hum = mu / (1 - (1 - mu) * (pres_db_sat / pressure))\n if rel_hum < 0 or rel_hum > 1:\n return -1\n if output_str in ('rh', 'RH'):\n return rel_hum\n\n if output_str in ('sv', 'SV'):\n spec_vol = 1545.32 * db_r * (1 + 1.6078 * hum_rat) / (28.9645 * pressure * 144)\n return spec_vol\n\n if output_str in ('en', 'EN'):\n enthalpy = 0.24 * dry_bulb + hum_rat * (1061 + 0.444 * dry_bulb)\n return enthalpy\n else:\n return ValueError('Unknown output request')\n elif input_str_1 in ('rh', 'RH') or input_str_2 in ('rh', 'RH'):\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n elif input_str_1 in ('hr', 'HR') or input_str_2 in ('hr', 'HR'):\n if output_str in ('hr', 'HR'):\n return hum_rat\n elif input_str_1 in ('sv', 'SV') or input_str_2 in ('sv', 'SV'):\n if output_str in ('sv', 'SV'):\n return spec_vol\n elif input_str_1 in ('en', 'EN') or input_str_2 in ('en', 'EN'):\n if output_str in ('en', 'EN'):\n return enthalpy\n return -1 # no enthalpy, wet bulb and enthalpy are too closely related to avoid problems\n elif input_str_1 in ('dp', 'DP') or input_str_2 in ('dp', 'DP'):\n if output_str in ('dp', 'DP'):\n return dewpoint\n\n dp_r = dewpoint + 459.67\n\n if input_str_1 in ('rh', 'RH') or input_str_2 in ('rh', 'RH'):\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n elif input_str_1 in ('sv', 'SV') or input_str_2 in ('sv', 'SV'):\n if output_str in ('sv', 'SV'):\n return spec_vol\n elif input_str_1 in ('en', 'EN') or input_str_2 in ('en', 'EN'):\n if output_str in ('en', 'EN'):\n return enthalpy\n elif input_str_1 in ('hr', 'HR') or input_str_2 in ('hr', 'HR'):\n if output_str in ('hr', 'HR'):\n return hum_rat\n return -1 # no humidity ratio - it is the dew point more or less\n elif input_str_1 in ('hr', 'HR') or input_str_2 in ('hr', 'HR'):\n if output_str in ('hr', 'HR'):\n return hum_rat\n\n if input_str_1 in ('rh', 'RH') or input_str_2 in ('rh', 'RH'):\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n elif input_str_1 in ('sv', 'SV') or input_str_2 in ('sv', 'SV'):\n if output_str in ('sv', 'SV'):\n return spec_vol\n elif input_str_1 in ('en', 'EN') or input_str_2 in ('en', 'EN'):\n if output_str in ('en', 'EN'):\n return enthalpy\n elif input_str_1 in ('rh', 'RH') or input_str_2 in ('rh', 'RH'):\n if output_str in ('rh', 'RH'):\n return rel_hum * 100\n\n if input_str_1 in ('sv', 'SV') or input_str_2 in ('sv', 'SV'):\n if output_str in ('sv', 'SV'):\n return spec_vol\n elif input_str_1 in ('en', 'EN') or input_str_2 in ('en', 'EN'):\n if output_str in ('en', 'EN'):\n return enthalpy\n elif input_str_1 in ('sv', 'SV') or input_str_2 in ('sv', 'SV'):\n if output_str in ('sv', 'SV'):\n return spec_vol\n\n if input_str_1 in ('en', 'EN') or input_str_2 in ('en', 'EN'):\n if output_str in ('en', 'EN'):\n return enthalpy", "def preprocess_input(self, text):\n text = re.sub(r\"([^a-zA-Z0-9 -]+ +[^a-zA-Z0-9 -]*|[^a-zA-Z0-9 -]*\" +\n \" +[^a-zA-Z0-9 -]+)\", ' ', text, flags=re.UNICODE)\n text = re.sub(r\"([^a-zA-Z0-9 -]+$|^[^a-zA-Z0-9 -]+)\", '', text)\n text = re.sub(r\"([a-zA-Z0-9 -]+?)([^a-zA-Z0-9 -])([a-zA-Z0-9 -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE)\n text = re.sub(r\"([\\x00-\\x7F -]+?)([^a-zA-Z0-9 -]+)([\\x00-\\x7F -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE).encode(\"utf-8\")\n return re.sub(r\"([^a-zA-Z0-9 \\-\\'])\", '', text, flags=re.UNICODE)", "def prepare_value(self, value):\n if value is None:\n return value\n value = value.replace(\" \", \"\").replace(\".\", \"\")\n if value:\n return \"%s.%s.%s.%s\" % (value[0:3], value[3:7], value[7:11], value[11:])\n return value", "def format_value(self, val):\n\n if isinstance(val, (unicode, str)):\n val = self.standardise_quotes(val)\n \n try:\n val = unicode(val)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(val).encode('string_escape')\n val = unicode(ascii_text)\n\n return val", "def handle_value(value):\n\n if value[-1] == 'x':\n return float(value[0:-1])\n\n if value[-1] == '%':\n return float(value[0:-1])\n\n if value[0].isdigit():\n return bytify(value)\n\n raise ValueError", "def parse_string(self, in_str):\n match = MAIN_REGEX.search(in_str)\n if not match:\n err_str = \"Unable to parse string: %s\" % in_str\n raise ValueError(err_str)\n self.parse_completed(match.group(1))\n self.parse_priority(match.group(2))\n if match.group(3) and match.group(4):\n self.parse_completion_date(match.group(3))\n self.parse_creation_date(match.group(4))\n else:\n self.parse_creation_date(match.group(3))\n self.parse_description(match.group(5))", "def clean_string(raw_string):\n if raw_string == None:\n return\n\n clean_string = raw_string.strip('\\n')\n clean_string = ' '.join(clean_string.split())\n return clean_string", "def from_str (s):\n try: \n return from_csv(s)\n except Exception: \n pass\n \n try: \n return from_hex(s)\n except Exception: \n pass\n\n try:\n return from_name(s)\n except Exception: \n pass\n\n raise ColourFormatError(\"'%s' is not a recognized colour string\"%s)", "def str_to_rule(str_in):\r\n log.debug(\"str_to_rule: \"+str_in.strip())\r\n str_i = str_in.strip().split('#')[0].strip()\r\n if len(str_i)>0:\r\n dic_rule = dict(valid=False,type='para',errors=list(),key=\"\",val=\"\")\r\n if(str_i[0]==\"%\"): # % Parameter str_i\r\n lst_par = str_i[1:].split('=')\r\n lst_par = [par.strip() for par in lst_par]\r\n if lst_par[0] in lst_known_para:\r\n dic_rule['key']=lst_par[0].strip()\r\n dic_rule['val']=lst_par[1].strip()\r\n dic_rule['valid']=True\r\n log.info('Parameter recognised: '+str(dic_rule['key'])+' = '+str(dic_rule['val']))\r\n else:\r\n dic_rule['valid']=False\r\n dic_rule['errors'].append(\"Unrecognised parameter: \"+lst_par[0])\r\n log.warning('#205 > '+str(dic_rule['errors'])+' raw line: '+str_i)\r\n elif(str_i[0]==\":\"): # : Rule str_i\r\n dic_rule = dict(valid=False,type='rule',errors=list(),id=\"\",title=\"\",mode=\"\",data_table=\"\",condition=\"\",action=\"\",act_param=\"\",comment=\"\")\r\n lst_items = str_i[1:].split(\":\")\r\n lst_items = [itm.strip() for itm in lst_items]\r\n if len(lst_items)==8:\r\n dic_rule['id']=lst_items[0]\r\n dic_rule['title']=lst_items[1]\r\n dic_rule['mode']=lst_items[2].upper()\r\n dic_rule['layer']=lst_items[3]\r\n dic_rule['condition']=lst_items[4]\r\n dic_rule['action']=lst_items[5]\r\n dic_rule['act_param']=lst_items[6]\r\n dic_rule['comment']=lst_items[7]\r\n dic_rule = sanity_check(dic_rule)\r\n if not dic_rule['valid']:\r\n log.warning('#203 invalid rule > '+str(dic_rule['errors'])+' raw line: '+str_in)\r\n log.debug('parsed good rule: '+str(dic_rule))\r\n else:\r\n dic_rule['errors'].append(\"Rule string does not contain the correct number of elements - Check that you comment do not contain ':'. Ignoring this rule. \\n\\t\"+str_in.strip()+\"\\n\\t\"+str(len(lst_items))+'\\t'+str(lst_items))\r\n log.warning('#202 '+dic_rule['errors'])\r\n dic_rule['valid']=False\r\n else:\r\n dic_rule['errors'].append(\"Rule string must start with #, % or : But I found: \"+str_in[0]+\" in line (\"+str_in+\")\")\r\n log.warning('#201 '+str(dic_rule['errors'][-1:]))\r\n dic_rule['valid']=False\r\n else: # Empty (or only comments) str_i\r\n return {'type':'null', 'valid':True}\r\n return dic_rule", "def formatengsrt(input, output):\n \n p = ( (r\"-=.*?=-\\s+\", \"\", re.U), #类似 ==http://a.b.c/=- 删除\n (r\"<.*?>\", \"\", re.U), #类似 <...> 删除\n (r\"^[\\s\\d\\-:>,]*[\\r\\n]+\", r\"\", re.M|re.U), #'-'是特殊字符\n (r\"(\\S+)\\s+$\", r\"\\1\", re.M|re.U), #删除结尾的空余字符\n (r\"\\.{3}[\\r\\n]+([a-z])\", r\" \\1\", re.U), #结尾是...的,下一行开始是一个小写字母的。\n (r\"([^\\.?!])[\\r\\n]+\", r\"\\1 \", re.U), #结尾没有 .?!的,添加空格,去掉回车换行\n (r\"(\\w+)[,.?!](\\w)\", r\"\\1, \\2\", re.U), #有些单词后逗号后面没有空格,加上\n )\n\n d = chardet.detect(open(input, \"r\").read())\n print \"[%s] 自动检测为 %s\" %(input, d)\n\n with codecs.open(input, encoding=d['encoding'], mode='r') as fi:\n t = fi.read()\n \n for a, b, c in p:\n t = re.sub(a, b, t, 0, c)\n\n with codecs.open(output, encoding=d['encoding'], mode='w') as fo:\n fo.write(t)\n print \"[{}] compeleted.\".format(output)", "def street_parser(*street_data):\n\n # parsing tuples\n if len(street_data) == 2:\n if not isinstance(street_data[0], str) and not isinstance(street_data[1], str):\n raise WrongInput(\"Invalid format\")\n # street name as the tuple's first item\n strname, strnumber = street_data\n # street number as the tuple's first item\n if street_data[0][0] in digits:\n strname, strnumber = strnumber, strname\n\n # parsing strings\n else:\n if not isinstance(street_data[0], str):\n raise WrongInput(\"Invalid format\")\n if not street_data[0]:\n raise WrongInput(\"Input cannot be blank\")\n\n # string starting with street number\n if street_data[0][0] in digits:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\d+) # street number is any number of digits\n \\W+ # separator\n (\\w+\\W*\\w*\\W*) # street name is one or more words with optional separators\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n strnumber, strname = street_obj.groups()\n\n # string starting with street name\n else:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\w+\\W*\\w*\\s*) # street name is one or more words with optional separators\n \\W+ # separator\n (\\d+) # street number is any number of digits\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n (strname, strnumber) = street_obj.groups()\n\n # replace specific words in street name with their abbreviates\n strname = strname.lower()\n special = {r'\\baleje\\b': 'Al.', r'\\bavenue\\b': 'Av.', r'\\broad\\b': 'Rd.', r'\\bsquare\\b': 'Sq.',\n r'\\bstreet\\b': 'St.', r'\\bdrive\\b': 'Dr.'}\n for key in special:\n strname = re.sub(key, special[key], strname)\n return strname.title(), strnumber", "def makeinputstring(variabel):\r\n if type(variabel) == int:\r\n return str(variabel)\r\n elif type(variabel) == float:\r\n return str(int(float(variabel)))\r\n else:\r\n return str(variabel)", "def inputString(self):\n return self.__inputString" ]
[ "0.66427", "0.6007272", "0.5868643", "0.58346975", "0.57762647", "0.5764655", "0.57391506", "0.57352734", "0.5719354", "0.5683081", "0.5664899", "0.5664475", "0.5585627", "0.5576046", "0.5576046", "0.55689394", "0.5552692", "0.5552692", "0.5544937", "0.5495645", "0.54807526", "0.5418459", "0.53892833", "0.5384292", "0.5382638", "0.5378241", "0.5371518", "0.53586084", "0.5351287", "0.53503245", "0.53364813", "0.5335731", "0.530559", "0.5294708", "0.5285341", "0.5279502", "0.52728873", "0.5267332", "0.5266283", "0.52463883", "0.524558", "0.52444476", "0.5239453", "0.5236715", "0.5220451", "0.52135247", "0.5212925", "0.5206414", "0.5201156", "0.5199003", "0.51982063", "0.5190856", "0.51889104", "0.5188516", "0.51849896", "0.518319", "0.5181675", "0.5181284", "0.51721823", "0.5169754", "0.5168834", "0.51686615", "0.5158072", "0.51426816", "0.51420003", "0.51383287", "0.5128351", "0.51230925", "0.51162034", "0.5116013", "0.5115869", "0.5113908", "0.5112067", "0.5111067", "0.51051027", "0.5100286", "0.50986123", "0.50967115", "0.50910944", "0.5083668", "0.5077099", "0.5059676", "0.5059358", "0.50516516", "0.50502443", "0.5046505", "0.50324017", "0.5030788", "0.502869", "0.5026832", "0.50093925", "0.50085175", "0.50051737", "0.50009084", "0.49959996", "0.49952844", "0.49935725", "0.4990902", "0.49905077", "0.4986093", "0.49807656" ]
0.0
-1
View for rendering hours as json.
def json_hours(request): current_site = Site.find_for_request(request) if request.method == 'GET': if request.GET.get('fallback'): fallback = request.GET['fallback'] return JsonResponse( { 'llid': get_default_unit().location.libcal_library_id, } ) else: libcalid = request.GET['libcalid'] all_building_hours = json.dumps(get_building_hours_and_lid(current_site)) return JsonResponse( { 'all_building_hours': all_building_hours, 'current_hours': get_json_hours_by_id(int(libcalid), all_building_hours), 'llid': libcalid, 'llid_fallback': get_default_unit().location.libcal_library_id, } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_hours(employee_id):\n\n if not g.user:\n flash(\"Please Login to continue.\", \"danger\")\n return redirect(\"/\")\n \n employee = Employee.query.get_or_404(employee_id)\n\n labels = json.dumps( [\"Completed\", \"Required\"])\n data = json.dumps([employee.completed, employee.required])\n \n return render_template(\"users/display_hours.html\", employee = employee, labels = labels, data = data)", "def _draw_hours(self):\n tmp_str_list = []\n for i in range(0, self._g_width, self._min_grid):\n if i % self._hour_grid == 0:\n tmp_str_list.append('<polyline class=\"FullHour\" points=\"%d,%d, %d,%d\" />' % (\n i + .5 + 20, 20, i + .5 + 20, self._g_height))\n tmp_str_list.append('<text class=\"Label\" x=\"%d\" y=\"%d\">%d</text>' % (\n i + 20, 20, (i / self._hour_grid + self._offset) % 24))\n else:\n tmp_str_list.append('<polyline class=\"SubHour\" points=\"%d,%d,%d,%d\" />' % (\n i + .5 + 20, 20, i + .5 + 20, self._g_height))\n return \"\".join(tmp_str_list)", "def get(self, request):\n\t\tworkingHours = GymModels.WorkingHours.objects.all()\n\t\tserializer = PeopleSerializer.WorkingHourSerializer(workingHours, many=True)\n\t\treturn Response(serializer.data)", "def hours(self, venue_id):\n response = self._request(V2_ENDPOINTS['HOURS'] + venue_id)\n return response", "def hours(input=None):\n return get(input).hours", "def hours(self):\n return self.config['hours']", "def json_frapp(request):\n from pv.settings import MEDIA_URL\n\n if request.GET.get('date') == None:\n start = datetime.combine(date.today(), time(0, 0))\n else:\n start = datetime.combine( datetime.strptime(request.GET.get('date'), '%Y-%m-%d').date(), time(0, 0))\n\n end = datetime.combine(start, time(23, 59))\n\n timeslots = TimeSlot.objects.filter(start__gte=start,start__lte=end).select_related('show').order_by('start')\n\n\n '''Generate categories object for output'''\n\n categories = Category.objects.all()\n categories_output = []\n\n for c in categories:\n c_entry = {\n 'id': c.id,\n 'color': c.color.replace('#', '').upper(),\n 'namedisplay': c.category,\n 'description': c.description\n }\n\n categories_output.append(c_entry)\n\n # Get all series for timeslots\n series = set()\n for ts in timeslots:\n series.add(ts.show)\n\n\n '''Generate series object for output'''\n\n series_output = []\n\n for s in series:\n metainfos = []\n metainfos.append({ 'key': 'ProduzentIn', 'value': ', '.join(ts.show.hosts.values_list('name', flat=True)) })\n metainfos.append({ 'key': 'E-Mail', 'value': ', '.join(ts.show.hosts.values_list('email', flat=True)) })\n\n image = '' if s.image.name == None or s.image.name == '' else str(get_current_site(request)) + MEDIA_URL + s.image.name\n url = '' if s.website == None or s.website == '' else s.website\n\n # Get active schedules for the given date\n # But include upcoming single timeslots (with rrule_id=1)\n schedules = Schedule.objects.filter( Q(show=s.id,is_repetition=False) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n schedules_repetition = Schedule.objects.filter( Q(show=s.id,is_repetition=True) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n broadcastinfos = ''\n\n if not schedules.exists():\n continue\n\n for schedule in schedules:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n if schedules_repetition.exists():\n broadcastinfos = broadcastinfos + 'Wiederholung jeweils:'\n for schedule in schedules_repetition:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n s_entry = {\n 'id': s.id,\n 'categoryid': s.category.values_list('id', flat=True)[0],\n 'color': s.category.values_list('color', flat=True)[0].replace('#', '').upper(),\n 'namedisplay': s.name,\n 'description': s.description,\n 'url': url,\n 'image': image,\n 'broadcastinfos': broadcastinfos,\n 'metainfos': metainfos\n }\n\n series_output.append(s_entry)\n\n\n '''Generate shows object for output'''\n\n shows_output = []\n\n for ts in timeslots:\n\n is_repetition = ' ' + _('REP') if ts.schedule.is_repetition is 1 else ''\n namedisplay = ts.show.name + is_repetition\n description = ts.show.description\n url = str(get_current_site(request)) + '/shows/' + ts.show.slug\n urlmp3 = ''\n\n # If there's a note to the timeslot use its title, description and url\n try:\n note = Note.objects.get(timeslot=ts.id)\n namedisplay = note.title + is_repetition\n description = note.content\n url = str(get_current_site(request)) + '/notes/' + note.slug\n urlmp3 = note.audio_url\n except ObjectDoesNotExist:\n pass\n\n ts_entry = {\n 'id': ts.id,\n 'seriesid': ts.show.id,\n 'datetimestart': ts.start.strftime('%d.%m.%Y %H:%M:%S'),\n 'datetimeend': ts.end.strftime('%d.%m.%Y %H:%M:%S'),\n 'namedisplay': namedisplay,\n 'description': description,\n 'url': url,\n 'urlmp3': urlmp3,\n }\n\n shows_output.append(ts_entry)\n\n output = {}\n output['categories'] = categories_output\n output['series'] = series_output\n output['shows'] = shows_output\n\n return HttpResponse(json.dumps(output, ensure_ascii=False).encode('utf8'),\n content_type=\"application/json; charset=utf-8\")", "def format_hours(self, data):\n return unicode('%f' % data).rstrip('0').rstrip('.')", "def hourly(self):\n return c.Hourly(self)", "def output(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"type\": 2,\n \"enabled\": 1,\n \"days_of_week\": self.days,\n \"time\": self.time.output()\n }", "def open_hours_detail(self):\n return self._open_hours_detail", "def get_hourly_weather_details(self, hours: int = None):\n if hours is None:\n hours = 11\n forecast = super().get_weather_forecast(self.BASE_URL)\n headers = [\"date_time\",\n \"temp\",\n \"real_feel_temp\",\n \"wind_speed\",\n \"rain_probability\",\n \"cloud_cover\",\n ]\n for number in range(hours):\n data = []\n date_time = forecast[number]['DateTime']\n date_time = date_time[:16]\n date_time = date_time.replace('T', ' ')\n data.append(date_time)\n temp = round((int(\n forecast[number][\"Temperature\"][\"Value\"]) - 32) / 1.8)\n data.append(temp)\n real_feel_temp = round((int(\n forecast[number][\"RealFeelTemperature\"][\"Value\"]) - 32) / 1.8)\n data.append(real_feel_temp)\n wind_speed = forecast[number][\"Wind\"][\"Speed\"][\"Value\"]\n data.append(wind_speed)\n rain_probability = forecast[number][\"RainProbability\"]\n data.append(rain_probability)\n cloud_cover = forecast[number][\"CloudCover\"]\n data.append(cloud_cover)\n yield dict(zip(headers, data))", "def get_24h(self):\n records = self.level_model.get_for_period(1)\n self.set_attributes(records, '24 hours')", "def output(self):\n \n if self.days > 0:\n interval = \"%dd\" % self.days\n if self.hours > 0:\n interval = \"%dh\" % self.hours\n if self.minutes > 0:\n interval = \"%dm\" % self.minutes\n else:\n interval = \"%ds\" % self.seconds\n \n return {\n \"id\": self.id,\n \"name\": self.name,\n \"type\": 1,\n \"enabled\": 1,\n \"interval\": interval\n }", "def output(self):\n time = \"%04d-%02d-%02d %02d:%02d:%02d\" % (self.year, self.month, \\\n self.date, self.hours, self.minutes, self.seconds)\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"type\": 4,\n \"enabled\": 1,\n \"abstime\": time\n }", "def timesheet(request):\r\n return render(\r\n request,\r\n 'timesheet/timesheet.html'\r\n )", "def get_hourly(self):\n pass", "def output(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"type\": 3,\n \"enabled\": 1,\n \"days_of_month\": self.days,\n \"time\": self.time.output()\n }", "def hourly_data(self):\n return self._hourly_data", "def stats_format_hours(app_id, hours, hours_anon, hours_auth,\r\n max_hours, max_hours_anon, max_hours_auth):\r\n hourNewStats = dict(label=\"Anon + Auth\", disabled=\"True\", values=[], max=0)\r\n hourNewAnonStats = dict(label=\"Anonymous\", values=[], max=0)\r\n hourNewAuthStats = dict(label=\"Authenticated\", values=[], max=0)\r\n\r\n hourNewStats['max'] = max_hours\r\n hourNewAnonStats['max'] = max_hours_anon\r\n hourNewAuthStats['max'] = max_hours_auth\r\n\r\n for h in sorted(hours.keys()):\r\n # New answers per hour\r\n #hourNewStats['values'].append(dict(x=int(h), y=hours[h], size=hours[h]*10))\r\n if (hours[h] != 0):\r\n hourNewStats['values'].append([int(h), hours[h],\r\n (hours[h] * 5) / max_hours])\r\n else:\r\n hourNewStats['values'].append([int(h), hours[h], 0])\r\n\r\n # New Anonymous answers per hour\r\n if h in hours_anon.keys():\r\n #hourNewAnonStats['values'].append(dict(x=int(h), y=hours[h], size=hours_anon[h]*10))\r\n if (hours_anon[h] != 0):\r\n hourNewAnonStats['values'].append([int(h), hours_anon[h],\r\n (hours_anon[h] * 5) / max_hours])\r\n else:\r\n hourNewAnonStats['values'].append([int(h), hours_anon[h], 0])\r\n\r\n # New Authenticated answers per hour\r\n if h in hours_auth.keys():\r\n #hourNewAuthStats['values'].append(dict(x=int(h), y=hours[h], size=hours_auth[h]*10))\r\n if (hours_auth[h] != 0):\r\n hourNewAuthStats['values'].append([int(h), hours_auth[h],\r\n (hours_auth[h] * 5) / max_hours])\r\n else:\r\n hourNewAuthStats['values'].append([int(h), hours_auth[h], 0])\r\n return hourNewStats, hourNewAnonStats, hourNewAuthStats", "def json(self):\n return {\n 'uri': self.view_uri,\n 'created': time.strftime('%c', time.gmtime(self.created)),\n 'created_timestamp': self.created,\n 'exception_type': str(self.exc_type),\n 'exception': str(self.exc_value),\n }", "def to_json(self):\n new_dict = self.__dict__.copy()\n new_dict.update({'__class__': str(self.__class__.__name__)})\n for key, value in new_dict.items():\n if isinstance(value, datetime):\n new_dict[key] = value.strftime(self.timeformat)\n return new_dict", "def getJSON(self):\n text = super().getJSON() + f', \"exchange\": \"{self.__exchange}\"'\n text += f', \"market pair\": \"{self.__market_pairs}\"'\n text += f', \"interval\": \"{self.__interval}\"}}'\n return text", "def getHourColumn(self): \n return self.hourcol", "def get_working_hour(self):\n working_hrs_id = self.search([('active', '=', True)])\n if working_hrs_id:\n return {\n 'biz_open_time': time(int(working_hrs_id.start_hour), int(working_hrs_id.start_minute), 0),\n 'biz_close_time': time(int(working_hrs_id.end_hour), int(working_hrs_id.end_minute), 0),\n 'holiday_list': {line.occ_date: line.name for line in working_hrs_id.non_working_days_line},\n 'deadline_revise_percentage': working_hrs_id.deadline_revise_percentage,\n }\n raise ValidationError(\"Working Hour configuration is missing!\")", "def hour(self):\n return \"%s:00:00\" % self.timestamp[:13]", "def get_hours(self, date = \"\"):\n\n if date == \"\":\n DATE = datetime.today()\n else:\n year, month, day = date.split('-')\n DATE = datetime(int(year), int(month), int(day))\n\n s = requests.get(\"https://api.wdpro.disney.go.com/facility-service/schedules/{}?date={}-{}-{}\".format(self.__id, DATE.year, self.__formatDate(str(DATE.month)), self.__formatDate(str(DATE.day))), headers=getHeaders())\n data = json.loads(s.content)\n\n operating_hours_start = None\n operating_hours_end = None\n extra_hours_start = None\n extra_hours_end = None\n\n try:\n for i in range(len(data['schedules'])):\n if data['schedules'][i]['type'] == 'Operating':\n operating_hours_start = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['startTime'][0:2]), int(data['schedules'][i]['startTime'][3:5]))\n if int(data['schedules'][i]['endTime'][0:2]) >= 0 and int(data['schedules'][i]['endTime'][0:2]) <= 7:\n DATETEMP = DATE + timedelta(days=1)\n operating_hours_end = datetime(DATETEMP.year, DATETEMP.month, DATETEMP.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n else:\n operating_hours_end = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n\n if data['schedules'][i]['type'] == \"Special Ticketed Event\":\n extra_hours_start = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['startTime'][0:2]), int(data['schedules'][i]['startTime'][3:5]))\n if int(data['schedules'][i]['endTime'][0:2]) >= 0 and int(data['schedules'][i]['endTime'][0:2]) <= 7:\n DATETEMP = DATE + timedelta(days=1)\n extra_hours_end = datetime(DATETEMP.year, DATETEMP.month, DATETEMP.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n else:\n operating_hours_end = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n\n except KeyError:\n pass\n return operating_hours_start, operating_hours_end, extra_hours_start, extra_hours_end", "def active_hours(self):\n return self._active_hours", "def post(self, request):\n\t\tserializer = PeopleSerializer.WorkingHourSerializer(data=request.data)\n\t\tif serializer.is_valid():\n\t\t\tserializer.save()\n\t\t\treturn Response(serializer.data, status=200)\n\t\treturn Response(serializer.errors, status=422)", "def hour(self):\n return self._hour", "def heartbeat(request):\r\n output = {\r\n 'date': datetime.now(UTC).isoformat(),\r\n 'courses': [course.location.to_deprecated_string() for course in modulestore().get_courses()],\r\n }\r\n return HttpResponse(json.dumps(output, indent=4))", "def get_json(self):\n return {\n \"power\": self.get_power(), \n \"timestamp\": self.get_timestamp(), \n \"shortage\": self.get_shortage()\n }", "def hour(self) -> int:\n return pulumi.get(self, \"hour\")", "def json_events(request):\n if request.method == 'GET':\n ttrss_url = request.GET['feed']\n\n # need xml for this. \n university_url = 'http://events.uchicago.edu/widgets/rss.php?key=47866f880d62a4f4517a44381f4a990d&id=48'\n\n n = datetime.datetime.now()\n return JsonResponse(\n {\n 'events': flatten_events(get_events(university_url, ttrss_url, n, n + relativedelta(years=1), False))\n }\n )", "def getHour(self, parent):\r\n self.now = datetime.now()\r\n self.current_time = self.now.strftime(\"%H:%M:%S\")\r\n self.lineEditWidgets[\"HORA\"].setText(self.current_time)", "def get_hourly(station_id):\n hourdata = db.session.query(func.avg(DublinBike.available_bike)) \\\n .filter(DublinBike.number == station_id) \\\n .group_by(extract('hour', DublinBike.localtime)) \\\n .order_by(extract('hour', DublinBike.localtime)) \\\n .all()\n return jsonify([\n {'hour': i,\n 'available_bike': float(hourdata[i][0])\n } for i in range(24)\n ])", "def __hour(self):\n return _VirtualColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"hour\",\n operand1=self,\n operand2=None\n )", "def view_meetups():\n\n return make_response(jsonify({\n \"status\": 200,\n \"data\": meetups.view_meetups()\n })), 200", "def to_json(self):\n json = {\n \"url\": url_for(\n \"api.get_week\",\n year=self.year,\n week=self.week,\n ),\n \"year\": self.year,\n \"week\": self.week,\n \"text\": self.text or \"\",\n \"tags\": [tag.text for tag in self.tags],\n }\n return json", "def mean_time():\n return render_template('mean_time_weekday.html')", "def show_time(self):\n hour = str(datetime.datetime.now().strftime(\"%H\"))\n minute = str(datetime.datetime.now().strftime(\"%M\"))\n\n hour1 = int(hour[0])\n hour2 = int(hour[1])\n minute1 = int(minute[0])\n minute2 = int(minute[1])\n\n self.light_number(self.numbers[hour1], [0, 5])\n self.light_number(self.numbers[hour2], [0, 0])\n self.light_number(self.numbers[minute1], [5, 5])\n self.light_number(self.numbers[minute2], [5, 0])", "def to_representation(self, instance):\n rep = super(TicketSerializer, self).to_representation(instance)\n rep['showtime'] = f\"{instance.showtime.room.name} {instance.showtime.movie.title} \" \\\n f\"{instance.showtime.start_date}\"\n return rep", "def get_timetable():\n# NAME,DEGREE_CODE,SEM_CODE,EMPLOYEE,SUBJECT,TIMETABLE,EXAMDATE\n rows = db.engine.execute(f\"SELECT * FROM stutimetable WHERE ALTCODE = {g.user.ALTCODE}\")\n res = []\n for row in rows:\n res.append(dict(row))\n return jsonify(res)", "def h_24(self):\n return self._hour", "def get_events_for_next_hours(hours):\n\tresults = session.query(\"event_name\",\"date\",\"start_time\").\\\n\tfrom_statement(\"select event_name,date,start_time from event where timestampdiff(HOUR,now(),concat(date,' ',start_time))>0 and \\\n\t timestampdiff(HOUR,now(),concat(date,' ',start_time)) < :numhours\").\\\n\tparams(numhours = int(hours)).all()\n\t#return str(results)\n\tret_dict = {}\n\tevents = []\n\tfor event_tuple in results:\n\t\ttemp = {}\n\t\ttemp['event_name'] = event_tuple[0]\n\t\ttemp['start_date'] = str(event_tuple[1])\n\t\ttemp['start_time'] = str(event_tuple[2])\n\t\tevents.append(temp)\n\tret_dict['events'] = events\n\treturn jsonify(ret_dict)", "def __str__(self):\n return \"({0}:{1}:{2})\".format(self.hours, self.minutes, self.seconds)", "def __json__(self, request=None):\n # start = self.start.isoformat() if self.start else None\n # end = self.end.isoformat() if self.end else None\n return dict(\n timeref_type=\"daterange\",\n interval=self.interval,\n start=self.start.isoformat(),\n end=self.end.isoformat(),\n )", "def json_view(self, recursive=False):\n\n context = self.context.aq_inner\n data = self.export(context, recursive=recursive)\n pretty = json.dumps(data, sort_keys=True, indent=4)\n self.request.response.setHeader(\"Content-type\", \"application/json\")\n return pretty", "def schedule(request):\n return render(request, 'vaxcharts/schedule.html')", "def continent_json(request, continent_code):\n raise Http404(\"Not implemented\")", "def hour(self) -> int:\r\n return self._hour", "def hour(self) -> int:\r\n return self._hour", "def getHour(self):\n return _libsbml.Date_getHour(self)", "def _calc_times():\n app.logger.debug(\"Got a JSON request\")\n km = request.args.get('km', 999, type=float)\n distance = request.args.get('distance', type = int)\n begin_date = request.args.get('begin_date', type = str)\n begin_time = request.args.get('begin_time', type = str)\n dateAndTime = begin_date + \" \" + begin_time\n time = arrow.get(dateAndTime, 'YYYY-MM-DD HH:mm') \n \n open_time = acp_times.open_time(km, distance, time.isoformat())\n close_time = acp_times.close_time(km, distance, time.isoformat())\n result = {\"open\": open_time, \"close\": close_time}\n return flask.jsonify(result=result)", "def hours(self):\n return int(self.minutes / 60)", "def serialize(self):\n return {\n 'id' : self.id,\n #had to change to 'title' for full calendar, might change\n 'title' : self.name,\n 'host' : self.created_by,\n 'start' : self.start_on.isoformat(),\n 'end' : self.end_on.isoformat(),\n 'description' : self.description,\n 'color' : 'blue',\n }", "def render_time(dt):\n return dt.strftime('%H:%M:%S')", "def json(self):\n\t\treturn datetime.now()", "def list(self, request):\n teams = self.controller.retrieve_all_work_times()\n serializer = data_serializers.PresentWorkTimeDataSerializer(teams, many=True)\n return Response(serializer.data)", "def _task_data(self):\n output = {\n 'all': [],\n 'open': [],\n 'open_hours': 0,\n 'done': [],\n 'done_hours': 0,\n 'week_done': [],\n 'week_done_hours': 0,\n 'week_due': [],\n 'week_due_hours': 0,\n 'velocity': [],\n 'velocity_hours': 0,\n 'velocity_count': 0,\n }\n\n last_sunday = SUNDAY - timedelta(weeks=1)\n three_weeks_ago = MONDAY - timedelta(weeks=4)\n\n tasks = Task.originals.owner_id(self.pk).order_by('due_dt')\n for t in tasks:\n output['all'].append(t)\n # process open tasks\n if not t.completed:\n output['open'].append(t)\n output['open_hours'] += t.task_time\n\n # Process done tasks\n else:\n output['done'].append(t)\n output['done_hours'] += t.task_time\n if t.completed_dt >= three_weeks_ago and t.completed_dt <= last_sunday:\n output['velocity'].append(t)\n output['velocity_hours'] += t.task_time\n\n if t.due_dt >= MONDAY and t.due_dt <= SUNDAY:\n output['week_due'].append(t)\n output['week_due_hours'] += t.task_time\n\n if t.completed and t.completed_dt >= MONDAY and t.completed_dt <= SUNDAY:\n output['week_done'].append(t)\n output['week_done_hours'] += t.task_time\n\n output['all_hours'] = output['open_hours'] + output['done_hours']\n\n # Extra calcs for the velocity\n output['velocity_count'] = len(output['velocity'])\n\n if output['velocity_hours'] > 0:\n output['velocity_hours'] = round(output['velocity_hours']/3,2)\n if output['velocity_count'] > 0:\n output['velocity_count'] = round(Decimal(output['velocity_count'])/3,2)\n\n return output", "def get_events_for_specific_hours(start,end):\n\tresults = session.query(\"event_name\",\"date\",\"start_time\",\"end_time\").\\\n\tfrom_statement(\"select event_name,date,start_time,end_time from event where date=curdate() and \\\n\t\tstart_time >= :starttime and end_time <= :endtime\").\\\n\tparams(starttime = start, endtime = end).all()\n\tif(len(results) > 0):\n\t\tret_dict = {}\n\t\tevents = []\n\n\t\tfor event_tuple in results:\n\t\t\ttemp = {}\n\t\t\ttemp['event_name'] = event_tuple[0]\n\t\t\ttemp['start_date'] = str(event_tuple[1])\n\t\t\ttemp['start_time'] = str(event_tuple[2])\n\t\t\ttemp['end_time'] = str(event_tuple[3])\n\t\t\tevents.append(temp)\n\n\t\tret_dict['events'] = events\n\t\treturn jsonify(ret_dict)\n\telse:\n\t\treturn \"{'events':'no results returned'}\"", "def json(self):\n return {'User_uuid': self.uuid, 'School_id': self.school_id, 'Earned_points': self.us_dollar}", "def json(self):\n beat = self.beat + 1.4 # replace with hjd\n w, h = self.getWidth(), self.getHeight()\n \n return {\n \"_time\": beat,\n \"_duration\": self.dur,\n #\"_lineIndex\": 0,\n #\"_type\": 0,\n #\"_width\": 0,\n \"_customData\": {\n # to undo the local rotation z transform we have to take trig parts of it and multiply them by the dimensions of the wall, then add them to the position\n \"_position\": [self.l + math.cos(math.radians(self.lrot[2] - 90)) * h / 2, self.d + math.sin(math.radians(self.lrot[2]-90)) * h / 2 + h / 2],\n \"_scale\": [w, h],\n \"_rotation\": self.rot,\n \"_localRotation\": self.lrot\n }\n }", "def hourly_table(self):\n htable = [0 for i in range(24)]\n for i in range(self.dataset.shape[0]):\n stime = time.localtime(np.int32(self.dataset[i][2]))\n evtime = stime[3]\n htable[evtime] += 1\n return htable", "def hour(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"hour\")", "def hours_to_24h_time(hours: float) -> str:\n return (\n dt.datetime(2001, 1, 2) + dt.timedelta(hours=hours)\n ).time().isoformat()", "def index(req):\n secs = Section.objects.all().values()\n return Controller.render_json({'sections':list(secs), \"total\": len(secs)})", "def timesheet_all(request):\r\n return render(\r\n request,\r\n 'timesheet/timesheet_all.html'\r\n )", "def hour(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"hour\")", "def hour(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"hour\")", "def hour(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"hour\")", "def set_Hour(self, value):\n super(GetTimestampFromDateParametersInputSet, self)._set_input('Hour', value)", "def test_02_stats_hours(self):\r\n hour = unicode(datetime.datetime.utcnow().strftime('%H'))\r\n with self.flask_app.test_request_context('/'):\r\n hours, hours_anon, hours_auth, max_hours,\\\r\n max_hours_anon, max_hours_auth = stats.stats_hours(1)\r\n print hours\r\n for i in range(0, 24):\r\n # There should be only 10 answers at current hour\r\n if str(i).zfill(2) == hour:\r\n err_msg = \"At time %s there should be 10 answers\" \\\r\n \"but there are %s\" % (str(i).zfill(2),\r\n hours[str(i).zfill(2)])\r\n assert hours[str(i).zfill(2)] == 10, \"There should be 10 answers\"\r\n else:\r\n err_msg = \"At time %s there should be 0 answers\" \\\r\n \"but there are %s\" % (str(i).zfill(2),\r\n hours[str(i).zfill(2)])\r\n assert hours[str(i).zfill(2)] == 0, err_msg\r\n\r\n if str(i).zfill(2) == hour:\r\n tmp = (hours_anon[hour] + hours_auth[hour])\r\n assert tmp == 10, \"There should be 10 answers\"\r\n else:\r\n tmp = (hours_anon[str(i).zfill(2)] + hours_auth[str(i).zfill(2)])\r\n assert tmp == 0, \"There should be 0 answers\"\r\n err_msg = \"It should be 10, as all answers are submitted in the same hour\"\r\n tr = db.session.query(TaskRun).all()\r\n for t in tr:\r\n print t.finish_time\r\n assert max_hours == 10, err_msg\r\n assert (max_hours_anon + max_hours_auth) == 10, err_msg", "def view_events():\n result = get_events_helper(Event)\n return jsonify(result[0]), result[1]", "def reservetime_hour(self):\n return self._get_time_info([\"Reserve_Time_H\", \"reserveTimeHour\"])", "def __str__(self):\n return (\"\"\"\n{\n %s: {\n start_time: %s,\n stop_time: %s\n }\n}\"\"\" % (self._name, self._start_time, self._stop_time)\n )", "def get(self, request, pk, format=None):\n\n user = self.get_user(pk=pk)\n serializer = TimeReminderSerializer(user.time_reminders, many=True)\n \n if 'get_current' not in request.GET: #this means we just want all of the time reminders\n return Response(serializer.data)\n\n #else, we want to see if any time reminders should be going off\n time_to_display = []\n for element in serializer.data:\n reminder_time = element['time']\n curr_time = datetime.datetime.now()\n\n datetime_of_reminder = datetime.datetime.strptime(reminder_time, \"%Y-%m-%dT%H:%M:00Z\")\n test = datetime_of_reminder - curr_time #get time between event and now\n seconds = test.total_seconds()\n if (seconds < 35):\n time_to_display.append(element) #if less than 35 seconds in between, send out notification\n\n return Response(time_to_display)", "def info_event_json(event_id):\n event = Event.query.filter_by(id=event_id).first_or_404()\n timeuntil = timesince(event.countdown, until=True)\n return jsonify(event=event.data, timeuntil=timeuntil)", "def tradeHours(self, context):\n raise NotImplementedError", "def _get_hours_pro_entry(time_entries):\n events = []\n for event in time_entries:\n start_time = datetime.datetime(\n date.today().year,\n date.today().month,\n date.today().day,\n event.start_at.hour,\n event.start_at.minute,\n event.start_at.second,\n )\n end_time = datetime.datetime(\n date.today().year,\n date.today().month,\n date.today().day,\n event.finish_at.hour,\n event.finish_at.minute,\n event.finish_at.second,\n )\n\n timediff = end_time - start_time\n events.append(\n {\n \"worked_hours\": round(timediff.total_seconds() / 3600, DECIMALS_HOUR),\n \"event\": event,\n }\n )\n return events", "def get_zakopane_hourly_weather():\n zakopane = TwelveHoursWeatherForecast(location.get(\"zakopane\", \"\"))\n zakopane_weather_detail = zakopane.get_hourly_weather_details()\n zakopane_hourly_weather_detail = []\n for data in zakopane_weather_detail:\n zakopane_hourly_weather_detail.append(data)\n return zakopane_hourly_weather_detail", "def date_hour(date):\n return date.hour", "def export_to_json(self, hparams=None):\n the_dict = vars(self)\n the_dict.pop('TimeLimit.truncated', None)\n with open(f\"{self.folder}/{self.name}.json\", \"w\") as f:\n json.dump(the_dict, f)", "def _disp_times():\n fields = request.args.get('fields', type=str)\n format_type = request.args.get('format', type=str)\n top = request.args.get('top', type=int)\n token = request.args.get('token', type=str)\n results = {}\n\n result, length, code = retrieve(token, format_type, top, request_table[fields])\n return flask.jsonify(result=result, length=length, code=code)\n\n # elif code == 401: # Unauthorized\n # app.logger.debug(\"Token Expired! Let's log the user out.\")\n # return render_template('calc.html')", "def as_json(self):", "def get_time():\n return {\n 'timestamp': datetime.now()+ timedelta(hours=-1)\n }", "def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out", "def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out", "def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out", "def to_hours(self, timesteps, to_label=False):\n out = timesteps*self.dt/(60*60)\n if to_label:\n out = [ '{:.2f} hours'.format(el) for el in out ]\n return out", "def default(request):\n return direct_to_template(request,\n 'hours/app.html',\n {})", "def report_hour_distribution(self):\n self.histogram_granularities.add(histogram_granularity.HOUR)\n return self", "def get_hr_graph():\n signames_hr = queries['signames_hr']\n hrvariability = queries['hrvariability']\n return html.Div(style={'height': '30vh'}, className='hr', children=[dcc.Graph(\n id='hr-' + signame,\n style={'width': '100%'},\n figure={\n 'data': [\n {'x': np.array(hrvariability[signame])[:, 0],\n 'y': np.array(hrvariability[signame])[:, 1],\n 'mode': 'line', 'name': signame, 'line':{'color':'rgb(0,0,255)'}}\n ],\n 'layout': {\n 'font': {'color': '#fff'},\n 'title': signame + ' hr',\n 'xaxis': {'title': 'time', 'color': '#fff', 'showgrid': 'False'},\n 'yaxis': {'title': 'HR (beats/min', 'color': '#fff', 'showgrid': 'False'},\n 'paper_bgcolor': '#000', 'plot_bgcolor': '#000'\n }\n }) for signame in signames_hr])", "def proxy_hours_minutes(self):\n\n td = self.convert_last_col_filtered()\n resultat = td.days * 24 + td.seconds // 3600, (td.seconds // 60) % 60\n # print('{} H {} M'.format(*resultat))\n print(resultat)\n return resultat", "def for_json(self) -> str:\n return self.isoformat()", "def report_gpustat():\n\n def _date_handler(obj):\n if hasattr(obj, 'isoformat'):\n return obj.isoformat()\n else:\n raise TypeError(type(obj))\n\n response.content_type = 'application/json'\n if EXCLUDE_SELF:\n resp = {'error': 'Excluded self!'}\n else:\n resp = core.my_gpustat()\n return json.dumps(resp, default=_date_handler)", "def calendar_view(request, calendar_id):\n calendar_obj = Calendar.objects.get(pk=calendar_id)\n try:\n appointments = Appointment.objects.all().filter(calendar=calendar_obj)\n appointments = jsonify(appointments)\n except:\n appointments = []\n calendar_obj = calendar_obj.serialize()\n calendar_obj[\"non_working_days\"] = [day for day in [0, 1, 2, 3, 4, 5, 6] if day not in calendar_obj[\"working_days\"]]\n return render(request, 'calendar_view.html', {'calendar_obj': calendar_obj, 'appointments': appointments})", "def _task_data(self):\n output = {\n 'all': [],\n 'all_hours': 0,\n 'open': [],\n 'open_hours': 0,\n 'done': [],\n 'done_hours': 0,\n }\n\n tasks = Task.originals.project_id(self.pk).order_by('due_dt')\n for t in tasks:\n # process open tasks\n if not t.completed:\n output['open'].append(t)\n output['open_hours'] += t.task_time\n\n # Process done tasks\n else:\n output['done'].append(t)\n output['done_hours'] += t.task_time\n\n # Included in the loop to keep the ordering\n output['all'].append(t)\n\n output['all_hours'] = output['open_hours'] + output['done_hours']\n\n return output", "def h_12(self):\n return self._pmhour", "def get(self, request, *args, **kwargs):\n query = '''\n SELECT\n DATE(bs.\"CreatedAt\"),\n count(1)\n FROM\n blood_sample_bloodsample as bs\n WHERE now() - '36 hour'::interval > bs.\"CreatedAt\" AND \\\n bs.\"State\" in ('0','4')\n GROUP BY DATE(bs.\"CreatedAt\") order by DATE(bs.\"CreatedAt\")\n '''\n\n with connection.cursor() as cursor:\n cursor.execute(query)\n processed_not_ontime = [\n [row[0], row[1]]\n for row in cursor.fetchall() if row[1]\n ]\n\n return JsonResponse(\n {\n 'status': 200,\n 'processed_not_ontime': processed_not_ontime,\n 'processed_hours': settings.PROCESSING_HOURS,\n }\n )" ]
[ "0.6702672", "0.63559496", "0.6134469", "0.60062844", "0.58493066", "0.5845087", "0.5821502", "0.58081955", "0.57617134", "0.5753844", "0.5742273", "0.5733102", "0.57142144", "0.56817853", "0.56517273", "0.5609031", "0.558943", "0.5580721", "0.5514128", "0.549668", "0.5485034", "0.5459778", "0.5456188", "0.543028", "0.54280025", "0.5410326", "0.5409438", "0.5405698", "0.5384301", "0.5382816", "0.53643906", "0.5364096", "0.5350562", "0.5346681", "0.534165", "0.53281605", "0.5321181", "0.5315334", "0.53138715", "0.53118694", "0.529093", "0.5278395", "0.52764416", "0.5244283", "0.5243668", "0.5238784", "0.52286446", "0.5214764", "0.519391", "0.51796293", "0.51778084", "0.51778084", "0.5173355", "0.5165089", "0.5156235", "0.5146726", "0.5145232", "0.5117731", "0.51170164", "0.5115999", "0.5113561", "0.51118493", "0.5102339", "0.509763", "0.5090351", "0.50811523", "0.5078924", "0.5076933", "0.5074425", "0.5074425", "0.5074425", "0.5066255", "0.5065117", "0.50619555", "0.5061919", "0.5059843", "0.505833", "0.5053662", "0.5047068", "0.50425196", "0.5036175", "0.5029547", "0.49995652", "0.49919927", "0.4981201", "0.49804777", "0.49691486", "0.49691486", "0.49691486", "0.49691486", "0.49672708", "0.4960847", "0.49563646", "0.49507585", "0.493862", "0.49349773", "0.49343115", "0.4926809", "0.49228948", "0.49207303" ]
0.72261083
0
View for rendering events feed data as json.
def json_events(request): if request.method == 'GET': ttrss_url = request.GET['feed'] # need xml for this. university_url = 'http://events.uchicago.edu/widgets/rss.php?key=47866f880d62a4f4517a44381f4a990d&id=48' n = datetime.datetime.now() return JsonResponse( { 'events': flatten_events(get_events(university_url, ttrss_url, n, n + relativedelta(years=1), False)) } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def view_events():\n result = get_events_helper(Event)\n return jsonify(result[0]), result[1]", "def get():\n return jsonify({'events': 'Events API'}), 200", "def get_event():\n json_data = request.args or {}\n return make_response(jsonify({ \"data\" : Event.get_events(json_data)}))", "def to_json(self):\n self._load_all_events()\n return json.dumps(\n [x.to_dict() for events in self._events.values() for x in events], indent=2\n )", "def show_events_list():\r\n\tevents_list = Page.objects.filter(tags='events').order_by('-created')\r\n\treturn {'events_list': events_list}", "def list(self):\n return JSONResponse(self.request).data(items=self._get_agenda_items()).dump()", "def update_events(request):\n events_data = request.data\n events_manager.deserialize_event(events_data)\n # print(events_manager.serialize_events())\n events_manager.apply()\n return JsonResponse({'nodes': []})", "def show_events(request):\n event_list = Event.objects.order_by('-date')\n\n event_form = EventForm()\n\n context = {'events': event_list, 'form': event_form}\n return render(request, 'metro_app/events_view.html', context)", "def json_news(request):\n if request.method == 'GET':\n feed = request.GET['feed']\n return JsonResponse(\n {\n 'news': get_news(feed),\n }\n )", "def json(self):\n return {\n 'uri': self.view_uri,\n 'created': time.strftime('%c', time.gmtime(self.created)),\n 'created_timestamp': self.created,\n 'exception_type': str(self.exc_type),\n 'exception': str(self.exc_value),\n }", "def info_event_json(event_id):\n event = Event.query.filter_by(id=event_id).first_or_404()\n timeuntil = timesince(event.countdown, until=True)\n return jsonify(event=event.data, timeuntil=timeuntil)", "def events_to_json(events):\n result = {}\n index = 0\n for e in events:\n event = {}\n event['id'] = e.id\n event['name'] = e.name\n event['datetime'] = e.datetime\n event['fee'] = e.fee\n event['max_capacity'] = e.max_capacity\n event['min_capacity'] = e.min_capacity\n result['event'+str(index)] = event\n index += 1\n return result", "def json_view(self, recursive=False):\n\n context = self.context.aq_inner\n data = self.export(context, recursive=recursive)\n pretty = json.dumps(data, sort_keys=True, indent=4)\n self.request.response.setHeader(\"Content-type\", \"application/json\")\n return pretty", "def list(self, request):\n user = request.auth.user\n events = Event.objects.order_by('datetime')\n search_text = self.request.query_params.get('q', None)\n if search_text is not None:\n events = events.filter(\n Q(cost__icontains=search_text)\n )\n search_text = self.request.query_params.get('date', None)\n if search_text is not None:\n events = events.filter(\n Q(datetime__icontains=search_text)\n )\n for event in events:\n event.bookmarked = None\n try:\n Bookmark.objects.get(event=event, user=user)\n event.bookmarked = True\n except Bookmark.DoesNotExist:\n event.bookmarked = False\n # game = self.request.query_params.get('gameId', None)\n # if game is not None:\n # events = events.filter(game__id=game)\n serializer = EventSerializer(\n events, many=True, context={'request': request})\n return Response(serializer.data)", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'event_url' : self.event_url,\n 'event_thumbnail_url' : self.event_thumbnail_url,\n 'description' : self.description,\n 'ticket_price' : self.ticket_price,\n 'start_date' : str(self.start_date),\n 'featued' : self.featured\n }", "def get_events_json(self, query_string, **kwargs):\n\n response = self._search_events(query_string, output_format=\"json\", **kwargs)\n\n return response.text", "def list(request, template='events/list.html'):\n return render(request, template, {\n 'events': Event.objects.get_upcoming().order_by('start_date'),\n })", "def events(self) -> [redirect, HTMLBody]:\n\t\t# Get all events and split into 2 groups\n\t\teventsl, eventsr = prepare_events(get_events())\n\t\treturn render_template(\"events.jinja2\", eventsl=eventsl, eventsr=eventsr)", "def myevents(self, request, pk=None):\n user = request.auth.user\n myevents = user.events\n serializer = EventSerializer(\n myevents, many=True, context={'request': request})\n return Response(serializer.data)", "def userevent_list(request):\n if request.method == 'GET':\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n SELECT\n full_name,\n user_id,\n id,\n title,\n description,\n date,\n time,\n name\n FROM\n EVENTS_BY_USER\n \"\"\")\n\n dataset = db_cursor.fetchall()\n\n # Take the flat data from the database, and build the\n # following data structure for each gamer.\n #\n # {\n # 1: {\n # \"id\": 1,\n # \"full_name\": \"Admina Straytor\",\n # \"games\": [\n # {\n # \"id\": 1,\n # \"title\": \"Foo\",\n # \"maker\": \"Bar Games\",\n # \"skill_level\": 3,\n # \"number_of_players\": 4,\n # \"game_type_id\": 2\n # }\n # ]\n # }\n # }\n\n events_by_user = {}\n\n for row in dataset:\n uid = row['user_id']\n if uid in events_by_user:\n events_by_user[uid]['events'].append({\n \"id\": row['id'],\n \"title\": row['title'],\n \"description\": row['description'],\n \"date\": row['date'],\n \"time\": row['time'],\n \"game_name\": row[\"name\"]\n })\n else:\n events_by_user[uid] = {\n \"gamer_id\": uid,\n \"full_name\": row['full_name'],\n \"events\": [{\n \"id\": row['id'],\n \"title\": row['title'],\n \"description\": row['description'],\n \"date\": row['date'],\n \"time\": row['time'],\n \"game_name\": row[\"name\"]\n }]\n }\n\n events = events_by_user.values() \n\n template = 'users/list_with_events.html'\n context = {\n 'userevent_list': events\n }\n\n return render(request, template, context)", "def list_all_events(as_format='json'):\n eventlist = []\n for event in Event.query \\\n .filter_by(is_hidden=False, lock_resources=False) \\\n .order_by(Event.starts_at.desc()).all():\n eventlist.append(event.data)\n if as_format == 'json':\n return jsonify(events=eventlist)\n headers = {'Content-Disposition': 'attachment; filename=events.csv'}\n csvlist = gen_csv(eventlist)\n return Response(stream_with_context(csvlist),\n mimetype='text/csv',\n headers=headers)", "def json_frapp(request):\n from pv.settings import MEDIA_URL\n\n if request.GET.get('date') == None:\n start = datetime.combine(date.today(), time(0, 0))\n else:\n start = datetime.combine( datetime.strptime(request.GET.get('date'), '%Y-%m-%d').date(), time(0, 0))\n\n end = datetime.combine(start, time(23, 59))\n\n timeslots = TimeSlot.objects.filter(start__gte=start,start__lte=end).select_related('show').order_by('start')\n\n\n '''Generate categories object for output'''\n\n categories = Category.objects.all()\n categories_output = []\n\n for c in categories:\n c_entry = {\n 'id': c.id,\n 'color': c.color.replace('#', '').upper(),\n 'namedisplay': c.category,\n 'description': c.description\n }\n\n categories_output.append(c_entry)\n\n # Get all series for timeslots\n series = set()\n for ts in timeslots:\n series.add(ts.show)\n\n\n '''Generate series object for output'''\n\n series_output = []\n\n for s in series:\n metainfos = []\n metainfos.append({ 'key': 'ProduzentIn', 'value': ', '.join(ts.show.hosts.values_list('name', flat=True)) })\n metainfos.append({ 'key': 'E-Mail', 'value': ', '.join(ts.show.hosts.values_list('email', flat=True)) })\n\n image = '' if s.image.name == None or s.image.name == '' else str(get_current_site(request)) + MEDIA_URL + s.image.name\n url = '' if s.website == None or s.website == '' else s.website\n\n # Get active schedules for the given date\n # But include upcoming single timeslots (with rrule_id=1)\n schedules = Schedule.objects.filter( Q(show=s.id,is_repetition=False) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n schedules_repetition = Schedule.objects.filter( Q(show=s.id,is_repetition=True) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n broadcastinfos = ''\n\n if not schedules.exists():\n continue\n\n for schedule in schedules:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n if schedules_repetition.exists():\n broadcastinfos = broadcastinfos + 'Wiederholung jeweils:'\n for schedule in schedules_repetition:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n s_entry = {\n 'id': s.id,\n 'categoryid': s.category.values_list('id', flat=True)[0],\n 'color': s.category.values_list('color', flat=True)[0].replace('#', '').upper(),\n 'namedisplay': s.name,\n 'description': s.description,\n 'url': url,\n 'image': image,\n 'broadcastinfos': broadcastinfos,\n 'metainfos': metainfos\n }\n\n series_output.append(s_entry)\n\n\n '''Generate shows object for output'''\n\n shows_output = []\n\n for ts in timeslots:\n\n is_repetition = ' ' + _('REP') if ts.schedule.is_repetition is 1 else ''\n namedisplay = ts.show.name + is_repetition\n description = ts.show.description\n url = str(get_current_site(request)) + '/shows/' + ts.show.slug\n urlmp3 = ''\n\n # If there's a note to the timeslot use its title, description and url\n try:\n note = Note.objects.get(timeslot=ts.id)\n namedisplay = note.title + is_repetition\n description = note.content\n url = str(get_current_site(request)) + '/notes/' + note.slug\n urlmp3 = note.audio_url\n except ObjectDoesNotExist:\n pass\n\n ts_entry = {\n 'id': ts.id,\n 'seriesid': ts.show.id,\n 'datetimestart': ts.start.strftime('%d.%m.%Y %H:%M:%S'),\n 'datetimeend': ts.end.strftime('%d.%m.%Y %H:%M:%S'),\n 'namedisplay': namedisplay,\n 'description': description,\n 'url': url,\n 'urlmp3': urlmp3,\n }\n\n shows_output.append(ts_entry)\n\n output = {}\n output['categories'] = categories_output\n output['series'] = series_output\n output['shows'] = shows_output\n\n return HttpResponse(json.dumps(output, ensure_ascii=False).encode('utf8'),\n content_type=\"application/json; charset=utf-8\")", "def serialize(self):\n return {\n 'id' : self.id,\n #had to change to 'title' for full calendar, might change\n 'title' : self.name,\n 'host' : self.created_by,\n 'start' : self.start_on.isoformat(),\n 'end' : self.end_on.isoformat(),\n 'description' : self.description,\n 'color' : 'blue',\n }", "def event_activity_json(event_id):\n limit = request.args.get('limit') or 50\n q = request.args.get('q') or None\n if q and len(q) < 3:\n q = None\n return jsonify(activities=get_event_activities(event_id, limit, q))", "def data_json(request):\n json_data = []\n for resource in Resource.objects.all():\n record = {} \n record['title'] = resource.name\n record['description'] = resource.description\n record['keyword'] = resource.csw_keywords.split(',')\n record['modified'] = resource.last_updated\n record['publisher'] = resource.organization\n record['contactPoint'] = resource.metadata_contact\n record['mbox'] = resource.contact_email\n record['identifier'] = resource.csw_identifier\n if resource.is_published:\n record['accessLevel'] = 'public'\n else:\n record['accessLevel'] = 'non-public'\n\n json_data.append(record)\n\n return HttpResponse(json.dumps(json_data), 'application/json')", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(self.get_data(context))", "def apigw_event():\n with open(\"events/event.json\") as json_file:\n return json.load(json_file)", "def info_current_event_json():\n event = Event.query.filter_by(is_current=True).first() or \\\n Event.query.order_by(Event.id.desc()).first_or_404()\n timeuntil = timesince(event.countdown, until=True)\n return jsonify(event=event.data, timeuntil=timeuntil)", "def home_feeds(request):\n result = {}\n \n result['feeds'] = []\n\n u = request.user\n\n\n # get other people's feeds, filter by friends if in social group\n feeds = Feed.objects.exclude(actor=u).order_by('-timestamp')\n result['feeds'] = [ f.get_json(me=u, android=True) for f in feeds ]\n\n return JSONHttpResponse(result)", "def events_info(request):\n \n global input\n \n if request == 'event-based':\n client_neries = Client_neries()\n \n events = client_neries.getEvents(min_datetime=input['min_date'], \\\n max_datetime=input['max_date'], min_magnitude=input['min_mag'], \\\n max_magnitude=input['max_mag'], min_latitude=input['evlatmin'], \\\n max_latitude=input['evlatmax'], min_longitude=input['evlonmin'], \\\n max_longitude=input['evlonmax'], min_depth = input['min_depth'], \\\n max_depth=input['max_depth'], max_results=input['max_result'])\n \n for i in range(0, len(events)):\n events[i]['t1'] = events[i]['datetime'] - input['preset']\n events[i]['t2'] = events[i]['datetime'] + input['offset']\n \n elif request == 'continuous':\n m_date = UTCDateTime(input['min_date'])\n M_date = UTCDateTime(input['max_date'])\n \n t_cont = M_date - m_date\n \n events = []\n \n if t_cont > input['interval']:\n num_div = int(t_cont/input['interval'])\n t_res = t_cont - num_div*input['interval']\n \n for i in range(0, num_div):\n events.append({'author': 'NAN', 'event_id': 'continuous' + str(i), \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date + i*input['interval'], \\\n 't1': m_date + i*input['interval'],\\\n 't2': m_date + (i+1)*input['interval'] + 60.0,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n \n events.append({'author': 'NAN', 'event_id': 'continuous' + str(i+1), \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date + (i+1)*input['interval'], \\\n 't1': m_date + (i+1)*input['interval'],\\\n 't2': M_date,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n else:\n events.append({'author': 'NAN', 'event_id': 'continuous0', \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date, \\\n 't1': m_date,\\\n 't2': M_date,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n\n return events", "def render_json(object):\r\n return HttpResponse(jsonify(object), content_type='application/json')", "def list(self, request):\n return HttpResponse(request.user.series_list, content_type=\"application/json\")", "def index():\n\n if request.method == 'POST':\n app.logger.info('Event: ' + str(request.get_json()))\n process_event(request.get_json())\n return ''\n else:\n return render_template('index.html')", "def _serialize_event_data_as_json(event_data):\n return json.dumps(event_data)", "def show_events():\n try:\n return render_template(\n 'events.html',\n events=get_events(),\n auth=is_organizer(get_user()),\n app_config=app.config\n )\n except RuntimeError as error:\n return str(error), 500", "def as_json(self):", "def events(self):\r\n return resources.Events(self)", "def vaccine_data_chart(request):\n\n print(request)\n labels = []\n data = []\n\n queryset = VaccineData.objects.filter(country=\"Australia\").order_by(\"date\")\n for entry in queryset:\n labels.append(entry.date)\n data.append(entry.doses_administered)\n\n return JsonResponse(\n data={\n \"country\": \"Australia Vaccine Doses Administered\",\n \"labels\": labels,\n \"data\": data,\n }\n )", "def EventToJSON(_object):\n return json.dumps(_object, default=jsonDefault)", "def get_all_feeds(request):\n feed = Feeds.objects.all()\n\n\n\n\n serializer = FeedsSerializer(feed,many=True)\n return JsonResponse({\"FeedList\":serializer.data,\"ResponseCode\": \"200\",\"ResponseMessage\":\"Successfully\"}, safe=False)", "def get_events():\n url = app.config['EVENTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_events(response.json())\n raise RuntimeError('Error in retrieving events.')", "def get_feed():\n return jsonify(dict({\n \"result\": mongo.get_hpfeed(),\n \"code\": 200\n }))", "def render_get(self, request):\n\n thing_event = get_thing_event(self._server, request)\n last_item = self._last_events.get(self._event_key(thing_event), None)\n payload = json.dumps(last_item).encode(\"utf-8\") if last_item else b\"\"\n response = aiocoap.Message(code=aiocoap.Code.CONTENT, payload=payload)\n response.opt.content_format = JSON_CONTENT_FORMAT\n\n raise tornado.gen.Return(response)", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(self.get_data(**context), **response_kwargs)", "def data_json(self, extra_context=None, publish=False):\n if not self.project.CREATE_JSON:\n # nothing to see here, but the right mimetype\n return jsonify()\n\n if not self.data:\n # this sets site.data by spreadsheet or gdoc\n self.get_context(publish)\n\n return jsonify(self.data)", "def event_activity_current_json():\n event = Event.query.filter_by(is_current=True).first()\n if not event:\n return jsonify(activities=[])\n return event_activity_json(event.id)", "def dumps(self):\n evn_data = {\"id\": self.id,\n \"name\": self.name,\n \"code\": [line.strip() for line in self.source_code]}\n\n return {\"event\": evn_data}", "def to_json(self):\n\n columns = list()\n points = list()\n\n if self._collection.type() == Event:\n columns += ['time']\n elif self._collection.type() == TimeRangeEvent:\n columns += ['timerange']\n elif self._collection.type() == IndexedEvent:\n columns += ['index']\n\n columns += self.columns()\n\n for i in self._collection.events():\n points.append(i.to_point(columns[1:]))\n\n cols_and_points = dict(\n columns=columns,\n points=points,\n )\n\n # fold in the rest of the payload\n cols_and_points.update(self._data)\n\n # Turn the index back into a string for the json representation.\n # The Index object can still be accessed via TimeSeries.index()\n if 'index' in cols_and_points and \\\n isinstance(cols_and_points.get('index'), Index):\n cols_and_points['index'] = cols_and_points.get('index').to_string()\n\n return cols_and_points", "def to_json(self):\n return json.dumps({\"data\": self._data.tolist(),\n \"header\": self._header.tolist(),\n \"dates\": self._dates.tolist()})", "def feed_entries(self):\n date_format = \"%Y-%m-%dT%H:%M:%SZ\"\n entries = self.mapper.list_entries(limit=10)\n if entries:\n updated = max([e.updated for e in entries]).strftime(date_format)\n else:\n updated = datetime.utcnow().strftime(date_format)\n return {\"entries\": entries, \"updated\": updated}", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def json(data):\n if isinstance(data, dict):\n data = ujson.encode(data)\n uid = str(uuid.uuid4())\n display(HTML('<div id=\"{0}\" style=\"height: 600px; width:100%;\"></div>'.format(uid)))\n display(Javascript(\"\"\"\n require([\"https://rawgit.com/caldwell/renderjson/master/renderjson.js\"], function() {\n document.getElementById('%s').appendChild(renderjson(%s))\n });\n \"\"\" % (uid, data)))", "def export_event(self):\n\n cal = Eve()\n cal.add('summary', str(self.categories))\n cal.add('description', self.label)\n cal.add('dtstart', vDatetime(self.start))\n cal.add('dtend', vDatetime(self.end))\n return cal.to_ical()", "def __json__(self, request=None):\n # start = self.start.isoformat() if self.start else None\n # end = self.end.isoformat() if self.end else None\n return dict(\n timeref_type=\"daterange\",\n interval=self.interval,\n start=self.start.isoformat(),\n end=self.end.isoformat(),\n )", "def render_to_json_response(self, context, **response_kwargs):\n return HttpResponse(\n self.convert_context_to_json(context),\n content_type='application/json',\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return HttpResponse(\n self.convert_context_to_json(context),\n content_type='application/json',\n **response_kwargs\n )", "def to_json(self):\n\t\tevents = [{\"name\": \"Killstreak\", \"value\": v, \"tick\": t} for v, t, _ in self.killstreaks]\n\t\tevents += [{\"name\": \"Bookmark\", \"value\": v, \"tick\": t} for v, t, _ in self.bookmarks]\n\t\treturn {\"events\": sorted(events, key = lambda e: int(e[\"tick\"]))}", "def to_json_dict(self):\n json_dict = {}\n json_dict['event_status'] = self.event_status\n json_dict['event_status_list'] = self.event_status_list\n json_dict['spaces'] = [s.get_id() for s in self.spaces]\n json_dict['characters'] = [c.get_id() for c in self.characters]\n json_dict['exits'] = [e.get_id() for e in self.exits]\n json_dict['items'] = [i.get_id() for i in self.items]\n return json_dict", "def view_event_list(request, **kwargs):\n #lu = get_common_lookup(request)\n lu = { 'page_title' : 'MCB Event Tweets'\\\n , 'IS_TWEET_EVENT_PAGE' : True\n , 'TWEET_SUCCESS' : kwargs.get('success_msg', False)\n }\n \n if not request.user.is_authenticated():\n return HttpResponse('not logged in')\n \n if not is_user_in_group(request, TWEET_GROUP_NAME):\n return HttpResponse('not in tweet group')\n \n upcoming_events = MCBTweetEvent.get_events_awaiting_approval()\n \n lu.update({ 'upcoming_events' : upcoming_events\\\n #, 'my_checked_codes' : get_previously_checked_expense_codes(request)\\\n })\n #\n return render_to_response('tweet/events/event_list.html', lu, context_instance=RequestContext(request))", "def get_event():\n\t#Get HTTP query args.\n\tno = request.args.get('no')\n\tfilename = request.args.get('filename')\n\tcollection = mongo.db[filename]\n\n\tjsonEncoder = hepmcio_json.HepMCJSONEncoder()\n\thepMCDecoder = hepmcio_json.HepMCJSONDecoder()\n\tjsonDecoder = json.JSONDecoder()\n\t#Everything below same as in the Visualiser view.\n\tevent = collection.find_one({\"type\":\"event\", \"no\":int(no)}, {\"_id\":False})\n\tparticleJson = collection.find({\"type\":\"particle\", \"event\":event[\"barcode\"]}, {\"_id\":False})\n\tparticles = []\n\tfor particle in particleJson:\n\t\tparticles.append(jsonEncoder.encode(particle))\n\tvertices = []\n\tvertexJson = collection.find({\"type\":\"vertex\", \"event\":event[\"barcode\"]}, {\"_id\":False})\n\tfor vertex in vertexJson:\n\t\tvertices.append(jsonEncoder.encode(vertex))\n\tevent = jsonEncoder.encode(event)\n\n\teventObject = hepmcio_json.EventJSONObject(event, particles, vertices)\n\t\n\tdecodedEvent = hepMCDecoder.decode(eventObject)\n\n\tPT_CUTOFF = 0.0\n\tintParticles = [particle for particle in decodedEvent.particles.values() if particle.status!=1 and \\\n\t\tparticle.mom[0]**2 + particle.mom[1]**2 > PT_CUTOFF**2]\n\t\n\tintParticleAncestors = reduce(operator.concat, [hepmcio.get_ancestors(particle)[:-1] for particle in intParticles])\n\n\tparticles = []\n\tfor particle in (intParticles + intParticleAncestors):\n\t\tparticles.append(jsonDecoder.decode(jsonEncoder.encode(particle)))\n\t\n\tvertices = list(map(jsonDecoder.decode, vertices))\n\t\n\treturn {\"particles\":jsonEncoder.encode(particles), \"vertices\":jsonEncoder.encode(vertices)}", "def get(self, request, *arg, **kwargs):\r\n response = {\r\n 'status' : False,\r\n 'response' : {},\r\n 'error' : {\r\n 'internal' : None,\r\n 'external' : None\r\n }\r\n }\r\n\r\n try:\r\n #list comprehencion querying a low values table for names\r\n events = [event.name for event in EventType.objects.filter(is_active=True)]\r\n\r\n events = self.event_processor(_events=events)\r\n\r\n response['status'] = True\r\n response['response'] = events\r\n\r\n\r\n except Exception as GetException:\r\n response['error']['internal'] = GetException.__str__()\r\n response['error']['external'] = \"Something went wrong\"\r\n \r\n return JsonResponse(response)", "def to_json(self):\n return {\n \"item_name\": self.item_name,\n \"summary\": self.summary,\n \"content\": self.content,\n \"date_published\": self.date_published,\n \"item_slug\": self.item_slug,\n \"category_name\": self.category_name,\n }", "def render(self, data):\n logging.info(\"render (start)\")\n\n seria = json.dumps(data, ensure_ascii=False, indent=4)\n logging.info(\"rendered %s characters (end)\" % len(seria))\n return seria", "def json(self):\n return {'id': self.id, 'name': self.name, 'description': self.description}", "def render_json(self, obj):\n self.response.content_type = \"application/json\"\n self.response.out.write(json.encode(obj))", "def all_bugs_chart(request):\n labels = []\n data = []\n\n queryset = Bug.objects.values('title', 'id').order_by('-created').exclude(status='Resolved').annotate(\n bug_votes=Count('votes'))[:5]\n for entry in queryset:\n labels.append(entry['title'])\n data.append(entry['bug_votes'])\n\n return JsonResponse(data={\n 'labels': labels,\n 'data': data,\n })", "def events(self):\n return get_tsv(self.path, self.values, 'events.tsv')", "def json_format(data):\n return {\n 'Title': data[\"title\"],\n 'Publication date': data['pubDate'],\n 'News link': data['link'],\n 'Image link': data['media'],\n }", "def all_features_chart(request):\n labels = []\n data = []\n\n queryset = Feature.objects.values('title').order_by('-created').exclude(status='Implemented').annotate(\n feature_purchases=Sum('purchases'))[:5]\n for entry in queryset:\n labels.append(entry['title'])\n data.append(entry['feature_purchases'])\n\n return JsonResponse(data={\n 'labels': labels,\n 'data': data,\n })", "def getchart(request):\n data = ast.literal_eval(request.body.decode('utf-8'))\n data['user'] = request.user\n data = newchart(data)\n return JsonResponse(data)", "def get_context_data(self, **kwargs):\n context = super(HomeView, self).get_context_data(**kwargs)\n context['events'] = Events.objects.filter(published=True).all()\n return context", "def covid_data_chart(request):\n\n print(request)\n labels = []\n data = []\n\n queryset = CovidData.objects.filter(country=\"Australia\").order_by(\"date\")\n for entry in queryset:\n labels.append(entry.date)\n data.append(entry.confirmed)\n\n return JsonResponse(\n data={\n \"country\": \"Australia Covid Cases\",\n \"labels\": labels,\n \"data\": data,\n }\n )", "def get_events(self):\n\n url = '/v2.4/'+self.page_id+'/events'\n data = self.graph.request(url)\n\n while 'next' in data['paging'].keys():\n print data['paging']['next']\n data = self.graph.request(url, args={\n 'limit' : 100,\n 'after' : data['paging']['cursors']['after']\n })\n\n return data", "def json(self):\n return {\n 'author': self.author,\n 'email': self.email,\n 'display_email': self.display_email,\n 'title': self.title,\n 'trailer_path': self.trailer_path,\n 'date': self.date,\n 'link': self.link,\n '_id': self._id\n }", "def format_events(self):\n\n log.debug('formatting events ({}) for \"{}\"'.format(len(self.unformatted_events), self.school_year))\n for event in self.unformatted_events:\n name = event.module.get_text().replace('\\n', '') if event.module else 'Matière non définie'\n category = event.category.get_text() if event.category else 'Catégorie de cours non définie'\n starttime = event.starttime.get_text() if event.starttime else 'Heure de début de cours non définie'\n endtime = event.endtime.get_text() if event.endtime else 'Heure de début de cours non définie'\n room = event.room.item.get_text() if event.room and event.room.item else 'Aucune salle définie'\n group = event.group.item.get_text() if event.group and event.group.item else 'Classe entière'\n nday = int(event.day.get_text()) if event.day else None\n date_first_weekday = self.week_dates_mapping[event.rawweeks.get_text()] if event.rawweeks else None\n start = '{}-{}'.format(date_first_weekday, starttime)\n end = '{}-{}'.format(date_first_weekday, endtime)\n dtstart = datetime.datetime.strptime(start, '%d/%m/%Y-%H:%M') + datetime.timedelta(days=nday, hours=-2)\n dtend = datetime.datetime.strptime(end, '%d/%m/%Y-%H:%M') + datetime.timedelta(days=nday, hours=-2)\n\n start_date = dtstart.isoformat() + 'Z'\n end_date = dtend.isoformat() + 'Z'\n\n calendar_event = GoogleCalendarEvent(\n location=room,\n summary='({}) - {} - {}'.format(category, name, group),\n description=group,\n dtstart=start_date,\n dtend=end_date\n )\n self.formatted_events.append(calendar_event.json)", "def getLineData(self):\n # Each view must have exactly one DateRange object\n date_range = DateRange.objects.filter(foreign_key=self.id).first()\n assert(date_range is not None)\n\n begin, end = date_range.getBeginEnd()\n return {\n \"begin\" : begin.strftime(\"%Y%m%d\"),\n \"end\" : end.strftime(\"%Y%m%d\"),\n \"data_sets\" : [\n {\n \"label\" : filter_set.label,\n \"color\" : filter_set.color,\n \"data\" : [\n {\n \"cnt\": row[\"cnt\"],\n \"date\": row[\"date\"].strftime(\"%Y%m%d\")\n } for row in filter_set.getMessageCountPerDay()\n ],\n } for filter_set in self.filterset_set.all()\n ]\n }", "def listings(request, category1, category2, category3, page = 1):\n \n # Creating URL for request\n base_url = \"https://www.eventbriteapi.com/v3/events/search/\"\n token_component = \"token=BKKRDKVUVRC5WG4HAVLT\" #I had this token in my mail link\n category_component = \"categories=\" + category1 + ',' + category2 + ',' + category3\n page_component = \"page=\" + str(page)\n url_without_page = base_url + \"?\" + token_component + \"&\" + category_component\n url_complete = url_without_page + \"&\" + page_component\n \n # GET events from Eventbrite\n f = urllib2.urlopen(url_complete) \n json_string = f.read() \n parsed_json = json.loads(json_string) \n\n # Parse through JSON\n events = parsed_json['events']\n eventsList = []\n \n for i in events:\n eventsList.append(event_container())\n \n # Parse further through JSON\n eventsList[-1].name = i['name']['text']\n eventsList[-1].id = i['id']\n eventsList[-1].url = i['url']\n try:\n eventsList[-1].description = i['description']['text']\n except:\n eventsList[-1].description = \"No description available\"\n eventsList[-1].resource_uri = i['resource_uri']\n \n \n listings_url_base = '/topthree/listings/'+ category1 + '/' + category2 + '/' + category3 + '/'\n \n # Pagination\n \n \"\"\"\n Performing manual pagination instead of Django pagination \n because GET request for events pulls in paginated data already\n \"\"\"\n \n next_page = int(page) + 1\n next_page_url = listings_url_base + str(next_page) \n \n if int(page)>1:\n prev_page = int(page) - 1\n prev_page_url = listings_url_base + str(prev_page) \n\n else:\n prev_page = 0\n prev_page_url = \"#\"\n \n \n # Sending values to template\n \n template = loader.get_template('listings.html')\n\n context = RequestContext(request, {\n 'eventsList': eventsList,\n 'prev_page_url':prev_page_url,\n 'next_page_url':next_page_url,\n 'prev_page':prev_page,\n 'page':page,\n 'category1':category1,\n 'category2':category2,\n 'category3':category3,\n })\n \n return HttpResponse(template.render(context))", "def format(self, *args):\n\t\tweb.header('Content-Type', 'application/json; charset=utf-8')\n\t\treturn json.dumps(self.content)", "def schedule_content(request):\r\n\r\n stories = Story.objects.filter(organization=request.user.organization).exclude(archived=True)\r\n\r\n # data = {}\r\n # data['success'] = 1\r\n # data['result'] = []\r\n data = []\r\n\r\n for story in stories:\r\n # Facet Schedules\r\n for facet in story.facetstory.all():\r\n credit = {}\r\n for user in facet.credit.all():\r\n credit['id'] = []\r\n credit['id'].append(user.credit_name)\r\n credit['id'].append(user.get_absolute_url())\r\n editor = {}\r\n for user in facet.editor.all():\r\n editor['id'] = []\r\n editor['id'].append(user.credit_name)\r\n editor['id'].append(user.get_absolute_url())\r\n print credit\r\n if facet.due_edit:\r\n edit_event_dict = {}\r\n edit_event_dict['id'] = facet.id\r\n edit_event_dict['title'] = facet.name.encode('utf-8')\r\n edit_event_dict['description'] = facet.description.encode('utf-8')\r\n edit_event_dict['due-edit'] = time.mktime(facet.due_edit.timetuple())\r\n edit_event_dict['editor'] = facet.editor.credit_name\r\n edit_event_dict['credit'] = credit\r\n edit_event_dict['url'] = facet.get_absolute_url()\r\n edit_event_dict['start'] = time.mktime(facet.due_edit.timetuple()) * 1000\r\n edit_event_dict['end'] = (time.mktime(facet.due_edit.timetuple()) * 1000) + 60\r\n edit_event_dict['overlap'] = True\r\n edit_event_dict['allDay'] = False\r\n edit_event_dict['backgroundColor'] = '#00aced'\r\n edit_event_dict['textColor'] = '#fff'\r\n data.append(edit_event_dict)\r\n if facet.run_date:\r\n run_event_dict = {}\r\n run_event_dict['id'] = facet.id\r\n run_event_dict['title'] = facet.name.encode('utf-8')\r\n run_event_dict['description'] = facet.description.encode('utf-8')\r\n run_event_dict['due-edit'] = time.mktime(facet.due_edit.timetuple())\r\n run_event_dict['editor'] = facet.editor.credit_name\r\n run_event_dict['credit'] = credit\r\n run_event_dict['url'] = facet.get_absolute_url()\r\n run_event_dict['class'] = 'event_run'\r\n run_event_dict['start'] = time.mktime(facet.run_date.timetuple()) * 1000\r\n run_event_dict['end'] = (time.mktime(facet.run_date.timetuple()) * 1000) + 60\r\n run_event_dict['overlap'] = True\r\n run_event_dict['backgroundColor'] = '#5cb85c'\r\n run_event_dict['textColor'] = '#fff'\r\n data.append(run_event_dict)\r\n\r\n # print \"DATA: \", data\r\n\r\n return HttpResponse(json.dumps(data), content_type='application/json')", "def render_to_response(self, context, **response_kwargs):\n return JsonResponse(context)", "def showEvents(self, year, month, language, filterIDs=None, negFilterIDs=None):\n try:\n returnEvents = self.newsFeedModel.getEvents(year, month, language, filterIDs, negFilterIDs)\n if language != 'de' and len(returnEvents) == 0:\n returnEvents = self.newsFeedModel.getEvents(year, month, 'de', filterIDs, negFilterIDs)\n returnEventsJSON = self.newsFeedView.toJSONEvents(returnEvents, self.newsFeedModel.getEventCategoriesLastChanged())\n return returnEventsJSON\n except Exception as e:\n print(\"there was a problem while retrieving events\")\n raise e", "def get_data(self):\n return self.data.to_json()", "def to_json(self, format=None):\n if format == \"widget\":\n return {\n \"source\": self.source,\n \"target\": self.target,\n \"type\": self.type,\n \"time\": self.time,\n \"value\": self.link_width,\n \"title\": self.title,\n \"color\": self.color,\n \"opacity\": self.opacity,\n \"data\": self.data,\n }\n else:\n return {\n \"source\": self.source,\n \"target\": self.target,\n \"type\": self.type,\n \"title\": self.title,\n \"time\": self.time,\n \"link_width\": self.link_width,\n \"data\": self.data,\n \"style\": {\"color\": self.color, \"opacity\": self.opacity,},\n }", "def to_json(self):\n json = {\n \"url\": url_for(\n \"api.get_week\",\n year=self.year,\n week=self.week,\n ),\n \"year\": self.year,\n \"week\": self.week,\n \"text\": self.text or \"\",\n \"tags\": [tag.text for tag in self.tags],\n }\n return json", "def get(self):\r\n #\"SELECT * FROM DBEvent\"\r\n self.insertContent(\"<hr>&nbsp;&nbsp;&nbsp;Грядущие события:<br>\")\r\n event = self.event #db.GqlQuery(self.query) \r\n eventlist=''\r\n #self.checkSession(self.request.headers.get('Cookie'), False)\r\n found_events = False\r\n \r\n ec = DBEventCat()\r\n cats = ec.get_categories()\r\n \r\n for this_event in event:\r\n try:\r\n if not found_events: found_events = True\r\n if self.Session['access'] >= this_event.access or int(self.Session['userid']) == int(this_event.userid) or this_event.access <= 0:\r\n eventlist += '<a href=\"/event/'+str(this_event.key().id())+'\">'+this_event.name.encode(\"utf8\")+'</a>'\r\n users = db.GqlQuery(\"SELECT * FROM DBEventAnketa WHERE eventid = :eventid\",\r\n eventid = this_event.key().id())\r\n if self.Session['access'] >= 8 or int(self.Session['userid']) == int(this_event.userid): \r\n eventlist += '&nbsp;[ <i><a href=\"/event/info/' + str(this_event.key().id()) + '\">Участников зарегистрировано: ' + str(users.count()) + '</i></a> ]<br>'\r\n elif self.Session['access'] >= this_event.access:\r\n eventlist += '&nbsp;[ <i>Участников зарегистрировано: ' + str(users.count()) + '</i> ]<br>'\r\n except: continue\r\n if found_events:\r\n self.insertTemplate('tpl_event_add.html', { 'eventlist': eventlist, 'cats' : cats })\r\n else:\r\n self.insertContent(\"&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Пока мероприятий не запланировано!\")\r\n self.insertContent(\"<hr>&nbsp;&nbsp;&nbsp;Недавно прошедшие события:<br>\")\r\n \r\n eventlist = ''\r\n events = db.GqlQuery(\"SELECT * FROM DBEvent where date<:today order by date desc limit 10\", today = db.datetime.date.today())\r\n for this_event in events:\r\n if self.Session['access'] >= this_event.access or int(self.Session['userid']) == int(this_event.userid):\r\n eventlist += '<a href=\"/event/'+str(this_event.key().id())+'\">'+this_event.name.encode(\"utf8\")+'</a>'\r\n users = db.GqlQuery(\"SELECT * FROM DBEventAnketa WHERE eventid = :eventid\",\r\n eventid = this_event.key().id())\r\n if self.Session['access'] >= 8 or int(self.Session['userid']) == int(this_event.userid): \r\n eventlist += '&nbsp;[ <i><a href=\"/event/info/' + str(this_event.key().id()) + '\">Участников зарегестрировано: ' + str(users.count()) + '</i></a> ]<br>'\r\n elif self.Session['access'] >= this_event.access:\r\n eventlist += '&nbsp;[ <i>Участников зарегистрировано: ' + str(users.count()) + '</i> ]<br>'\r\n self.insertTemplate('tpl_event_add.html', { 'eventlist': eventlist })\r\n\r\n \r\n #self.drawPage()\r", "def serialized_data(self):\n return {\n 'id': self.id,\n 'start_time': str(self.start_time),\n 'venue_id': self.venue_id,\n 'venue_name': self.venue.name,\n 'venue_image_link': self.venue.image_link,\n 'artist_id': self.artist_id,\n 'artist_name': self.artist.name,\n 'artist_image_link': self.artist.image_link\n }", "def get_user_events_json_list(user_events):\n events = []\n for user_event in user_events:\n events.append(user_event.json())\n return events", "def get_chart_one(request):\r\n json_str = []\r\n \r\n usuarios = Usuario.objects.all()\r\n for usuario in usuarios:\r\n peticiones = Peticion.objects.filter(usuario=usuario)\r\n json_str.append({ \r\n 'name': u'%s %s' % (usuario.persona.nombre,\r\n usuario.persona.apellidos),\r\n 'data': len(peticiones)\r\n }) \r\n json_obj = json.dumps(json_str, sort_keys=True, indent=4)\r\n response = HttpResponse(json_obj, mimetype=\"application/json\") \r\n return response", "def send_event():\n range = request.args.get('range', '60')\n time = arrow.utcnow().replace(minutes=-int(range))\n data = Event.query.filter(Event.timestamp > time).order_by(Event.timestamp.desc()).all()\n return jsonify(results=[i.serialize for i in data])", "def index(self):\r\n shows = Shows.query.order_by(asc(Shows.ShowID)).all()\r\n content = jsonify({\r\n \"shows\": [{\r\n \"date\": get_iso_format(show.ShowDate),\r\n \"countryCode\": show.CountryCode,\r\n \"country\": show.Country,\r\n \"city\": show.City,\r\n \"venue\": show.Venue,\r\n \"setlist\": self.get_setlist(show.ShowID),\r\n \"otherBands\": self.get_other_bands(show.ShowID),\r\n \"people\": self.get_show_people(show.ShowID),\r\n } for show in shows]\r\n })\r\n\r\n return make_response(content, 200)", "def json_response(self, request, *args, **kwargs):\n\n return HttpResponse(self.construct_json(),\n content_type='application/json',\n mimetype='application/json', status=self.status)", "def events(self):\n self.add_events(Event.objects.filter(event_end__gt=timezone.now()).order_by('event_start'))\n self.filename = 'events'", "def events(self):\n self.add_events(Event.objects.filter(event_end__gt=timezone.now()).order_by('event_start'))\n self.filename = 'events'", "def calendar_view(request, calendar_id):\n calendar_obj = Calendar.objects.get(pk=calendar_id)\n try:\n appointments = Appointment.objects.all().filter(calendar=calendar_obj)\n appointments = jsonify(appointments)\n except:\n appointments = []\n calendar_obj = calendar_obj.serialize()\n calendar_obj[\"non_working_days\"] = [day for day in [0, 1, 2, 3, 4, 5, 6] if day not in calendar_obj[\"working_days\"]]\n return render(request, 'calendar_view.html', {'calendar_obj': calendar_obj, 'appointments': appointments})", "def view_meetups():\n\n return make_response(jsonify({\n \"status\": 200,\n \"data\": meetups.view_meetups()\n })), 200", "def tags_JSON(request):\n tags_as_json = serializers.serialize('json', Tag.objects.all())\n return HttpResponse(json.dumps(tags_as_json), content_type='json')" ]
[ "0.76628214", "0.67185247", "0.6591272", "0.6508852", "0.6443426", "0.62528884", "0.6215547", "0.6096349", "0.60427344", "0.604139", "0.5985696", "0.5972402", "0.5937236", "0.5932184", "0.59189314", "0.58994985", "0.58941627", "0.5889959", "0.58717847", "0.58609265", "0.5860657", "0.58587974", "0.5854207", "0.58268523", "0.5809858", "0.5797601", "0.5782626", "0.57593095", "0.57396215", "0.5730987", "0.5728105", "0.5720628", "0.5711635", "0.57098824", "0.56965226", "0.56944025", "0.56931734", "0.56814444", "0.56772304", "0.5675143", "0.56580865", "0.56381464", "0.56306446", "0.5622252", "0.561796", "0.56102", "0.56038415", "0.5595678", "0.55928624", "0.5588356", "0.55834657", "0.55834657", "0.55834657", "0.55834657", "0.55834657", "0.55779874", "0.55724764", "0.55564684", "0.555611", "0.555611", "0.5556058", "0.55436075", "0.55392617", "0.55330795", "0.5528677", "0.552349", "0.55162275", "0.550388", "0.55001837", "0.5494819", "0.54897916", "0.5478243", "0.54735506", "0.54713285", "0.54690045", "0.5462745", "0.54611033", "0.54578495", "0.5455085", "0.5428083", "0.54187113", "0.54183817", "0.5405203", "0.5403234", "0.5401993", "0.53958297", "0.5394985", "0.53947437", "0.53914154", "0.5384819", "0.53777975", "0.53663635", "0.53563696", "0.5355197", "0.5354023", "0.5346089", "0.5346089", "0.5332492", "0.5329287", "0.5328116" ]
0.778918
0
View for rendering news feed data as json.
def json_news(request): if request.method == 'GET': feed = request.GET['feed'] return JsonResponse( { 'news': get_news(feed), } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def top_news():\n data = get_top_news()\n return jsonify(data)", "def news():\n\n # ensure parameters are present\n # geo = request.args.get(\"geo\")\n geo = '95060'\n if not geo:\n raise RuntimeError(\"missing geo\")\n\n # lookup articles and store them as JSON array\n article_list = lookup(geo)\n\n # TODO\n print(article_list)\n news = jsonify(article_list) \n print(news)\n # return render_template(\"index.html\")\n return article_list", "def json_events(request):\n if request.method == 'GET':\n ttrss_url = request.GET['feed']\n\n # need xml for this. \n university_url = 'http://events.uchicago.edu/widgets/rss.php?key=47866f880d62a4f4517a44381f4a990d&id=48'\n\n n = datetime.datetime.now()\n return JsonResponse(\n {\n 'events': flatten_events(get_events(university_url, ttrss_url, n, n + relativedelta(years=1), False))\n }\n )", "def json_format(data):\n return {\n 'Title': data[\"title\"],\n 'Publication date': data['pubDate'],\n 'News link': data['link'],\n 'Image link': data['media'],\n }", "def GET(self, *args):\n all_news= self.get_all_news()\n all_news.sort( key=lambda n : n['date'], reverse=True)\n if len(args):\n n_last=int(args[0])\n all_news = all_news[:n_last]\n\n return json.dumps(all_news)", "def home_feeds(request):\n result = {}\n \n result['feeds'] = []\n\n u = request.user\n\n\n # get other people's feeds, filter by friends if in social group\n feeds = Feed.objects.exclude(actor=u).order_by('-timestamp')\n result['feeds'] = [ f.get_json(me=u, android=True) for f in feeds ]\n\n return JSONHttpResponse(result)", "def show_news_list():\r\n\tnews_list = Page.objects.filter(tags='news').order_by('-created')\r\n\treturn {'news_list': news_list}", "def get_all_feeds(request):\n feed = Feeds.objects.all()\n\n\n\n\n serializer = FeedsSerializer(feed,many=True)\n return JsonResponse({\"FeedList\":serializer.data,\"ResponseCode\": \"200\",\"ResponseMessage\":\"Successfully\"}, safe=False)", "def newsfeed(request):\n article_list = Article.objects.order_by('published_date')\n context = {'article_list': article_list}\n return render(request, 'sacms/newsfeed.html', context)", "def to_json(self):\n return {\n \"item_name\": self.item_name,\n \"summary\": self.summary,\n \"content\": self.content,\n \"date_published\": self.date_published,\n \"item_slug\": self.item_slug,\n \"category_name\": self.category_name,\n }", "def get_feed():\n return jsonify(dict({\n \"result\": mongo.get_hpfeed(),\n \"code\": 200\n }))", "def json(self):\n if self.valid:\n return {\n 'articleID': self._id,\n 'ticker_symbol': self.ticker,\n 'published_date': self.pub_date,\n 'author_name': self.author,\n 'title': self.title,\n 'text': self.text,\n 'num_likes': 0,\n 'includes_symbols': self.includes\n }\n\n return {}", "def news(request):\n articles = News.objects.all()\n return render(request, 'news.html', {\"articles\": articles})", "async def news(self):\n url = f\"https://newsapi.org/v2/top-headlines?country=nz&apiKey={self.bot.news_api_key}\"\n async with ClientSession() as session:\n async with session.get(url) as response:\n r = await response.json()\n firstArticle = r[\"articles\"][0]\n nSource = firstArticle[\"source\"][\"name\"]\n nTitle = firstArticle[\"title\"]\n nTimestamp = firstArticle[\"publishedAt\"]\n embed = discord.Embed(\n title=f\"News Title: {nTitle}\", description=f\"News Source: {nSource}\"\n )\n embed.add_field(name=\"News Content\", value=firstArticle[\"description\"])\n embed.set_image(url=firstArticle[\"urlToImage\"])\n embed.set_footer(text=f\"News Timestamp: {nTimestamp}\")\n\n channel = self.bot.get_channel(self.bot.main_channel_id)\n await channel.send(embed=embed)", "def news_feed(request):\n\n all_friends = get_all_friends(request)\n news_feed = get_news_feed(request)\n user_profile = get_users_profile(request.user.id)\n\n context = {\n 'news_feed': news_feed,\n 'user_profile': user_profile,\n 'status_form': StatusForm,\n }\n\n return render(request, 'status/news_feed.html', context)", "def data_json(request):\n json_data = []\n for resource in Resource.objects.all():\n record = {} \n record['title'] = resource.name\n record['description'] = resource.description\n record['keyword'] = resource.csw_keywords.split(',')\n record['modified'] = resource.last_updated\n record['publisher'] = resource.organization\n record['contactPoint'] = resource.metadata_contact\n record['mbox'] = resource.contact_email\n record['identifier'] = resource.csw_identifier\n if resource.is_published:\n record['accessLevel'] = 'public'\n else:\n record['accessLevel'] = 'non-public'\n\n json_data.append(record)\n\n return HttpResponse(json.dumps(json_data), 'application/json')", "def render(self):\n with open(self.config[\"home.newssource\"]) as fp:\n stories = json.load(fp)\n\n filename = self.config[\"home.template\"]\n parts = dict(stories=stories)\n\n return self.render_template(filename, parts)", "def news_handle(news_json):\n #Sets all the variables to global space to be used later.\n global title_1, title_2, author_1, author_2, source_1, source_2, url_1_final, url_2_final\n #Handles data from API and assigns data to variables.\n news_data = news_json['articles']\n article_1 = news_data[0]\n article_2 = news_data[1]\n title_1 = article_1['title']\n author_1 = article_1['author']\n source_1 = article_1['source']['name']\n url_1 = article_1['url']\n url_1_final = '<a href=\"{}\">Read More</a>'.format(url_1)\n title_2 = article_2['title']\n author_2 = article_2['author']\n source_2 = article_2['source']['name']\n url_2 = article_2['url']\n url_2_final = '<a href=\"{}\">Read More</a>'.format(url_2)\n return", "def GET(self):\n web.header(\"Content-Type\",\"application/json; charset=utf-8\")\n\n data = web.input(module=\"module\", start=\"start\", num=\"num\", type=\"type\")\n module = data[\"module\"]\n start = data[\"start\"]\n num = data[\"num\"]\n type_ = data[\"type\"]\n\n module = (1 if module == \"module\" else module)\n start = (1 if start == \"start\" else start)\n num = (1 if num == \"num\" else num)\n\n news = api.get_news_fromDB(int(module), int(start), int(num))\n\n if type_ != \"html\":\n return json.dumps(news)\n else:\n web.header(\"Content-Type\",\"text/html; charset=utf-8\")\n html = \"\"\n for item in news:\n html = html + item[\"maindiv\"]\n return html", "def json(self):\n return {\n 'author': self.author,\n 'email': self.email,\n 'display_email': self.display_email,\n 'title': self.title,\n 'trailer_path': self.trailer_path,\n 'date': self.date,\n 'link': self.link,\n '_id': self._id\n }", "def tweet_list_view_pure_django(request, *args, **kwargs):\n\n qs = Tweet.objects.all()\n # tweets_list = [\n # {\"id\": x.id, \"content\": x.content, \"likes\": random.randint(0, 129)} for x in qs\n # ]\n\n # @Anyi use this line of code instead of the above line\n tweets_list = [x.serialize() for x in qs]\n\n data = {\"isUser\": False, \"tweet_list_response\": tweets_list}\n # @Anyi this is a new way of returning our data\n # instead of HttpResponse() or render()\n # and since we want to make our page as dynamic as possible\n return JsonResponse(data)", "def data_json(self, extra_context=None, publish=False):\n if not self.project.CREATE_JSON:\n # nothing to see here, but the right mimetype\n return jsonify()\n\n if not self.data:\n # this sets site.data by spreadsheet or gdoc\n self.get_context(publish)\n\n return jsonify(self.data)", "def get(self):\n\n return {\"message\": \"Welcome to the news API. \"}", "def json_posts_latest():\n posts = posts_base.order_by(Post.pubdate.desc())[:app.config['FEEDITEMS']]\n out = {'posts': []}\n for post_result in posts:\n post_dict = get_public_post_dict(post_result[0], post_result[2])\n out['posts'].append(post_dict)\n\n return jsonify(out)", "def display(request):\n data = {}\n about = About.objects.all()\n if about:\n data['success']=1\n data['message']=\"Comments available\"\n data['about']=[]\n for i in range(len(about)):\n data['about'].append(\n {'about':about[i].about,\n 'about_id':about[i].id,\n })\n return JsonResponse(data)\n else:\n data['success']=0\n data['message']='no about available'\n return JsonResponse(data)", "def newsList(request):\n\n news_count = New.objects.count() # Pocet vsech zaznamu novinek\n news_list = New.objects.all().order_by(\"date\") # Sort by date ... and only part of list\n # misto vsech zaznamu ziskat jen ty v intervalu start - stop -> API\n\n pictureOfWeek = PhotoOfWeek.objects.last()\n context = {'news_list': news_list, 'news_count': news_count, 'pictureOfWeek': pictureOfWeek}\n return render(request, 'news/newsList.html', context)", "def format(self, *args):\n\t\tweb.header('Content-Type', 'application/json; charset=utf-8')\n\t\treturn json.dumps(self.content)", "def get_recent_news_items():\n news_item_count = request.args.get('newsItemCount') or 3\n try:\n animal_news = AnimalNews.get_printable_news_items_all_animals(news_item_count)\n return jsonify(message=animal_news), 200\n except Exception as e:\n print(e)\n return jsonify(message='{}'.format(e)), 501", "def news_fetch(region,news_key):\n #Allows for customizable API key and weather location.\n url = (f\"http://newsapi.org/v2/top-headlines?country={region}&apiKey={news_key}\")\n #Gets API with requests and convert to .json\n news_api = requests.get(url)\n news_json = news_api.json()\n return news_json", "def news():\r\n with open('config.json', 'r') as cfile:\r\n config = json.load(cfile)\r\n news_api_key = config[\"news_api_key\"]\r\n response = requests.get(\"https://newsapi.org/v2/top-headlines?\"\r\n \"sources=bbc-news&apiKey=\" + news_api_key)\r\n resp_json = response.json()\r\n with open(\"news.json\", 'w') as file:\r\n json.dump(resp_json, file)\r\n file.close()", "def render_json(object):\r\n return HttpResponse(jsonify(object), content_type='application/json')", "def tweet_list_view(request, *args, **kwargs):\n\n objs = Tweet.objects.all()\n\n tweets_list = [{\"id\": obj.id, \"content\": obj.content} for obj in objs]\n\n data = {\n\n \"isUser\":False,\n\n \"tweets_list\": tweets_list\n }\n\n return JsonResponse(data)", "def newsfeed_en(request):\n article_list = Article.objects.order_by('published_date')\n context = {'article_list': article_list}\n return render(request, 'sacms/newsfeed_en.html', context)", "def get_news(request):\n return get_all_posts(request, PostType.NEWS)", "def get_feed(request):\n user = request.user\n\n # Get feed data and paginate it.\n feed = Post.objects.filter(community__communitymember__user=user).order_by(\n \"-created_at\"\n )\n\n # Check for filter phrase.\n phrase = request.GET.get(\"phrase\")\n if phrase is not None:\n if phrase != \"\" and not phrase.isspace():\n for term in phrase.split():\n feed = feed.filter(Q(title__icontains=term))\n else:\n feed = feed.none()\n\n return JsonResponse(\n json_paginator(request, feed, lambda d: d.serialize(request)),\n status=200,\n )", "def blog():\n return jsonify(\n {\n 'entries': [\n {\n 'title': 'A blog post about things',\n 'date': '7/3/12',\n 'id': 1,\n 'lead': \"\"\"once upon a time, there was a cool dude who did\n cool things. This is his story.\"\"\",\n 'body': \"More content for the blog post\",\n 'more_url': 'http://blog.tobywaite.net',\n },\n {\n 'title': 'Cool projects, ftw',\n 'date': '6/3/12',\n 'id': 2,\n 'lead': \"\"\"I did a really cool project once, this is all\n about it.\"\"\",\n 'body': \"More content for the blog post\",\n 'more_url': 'http://blog.tobywaite.net',\n },\n ]\n }\n )", "def __repr__(self):\r\n\r\n return f\"<News: id = {self.news_id}, title = {self.title} summary = {self.summary}>\"", "def select_news(self):\n data = self.soup.findAll('item')\n for item in data:\n news_data = dict()\n for tag in ['title', 'link']:\n news_data[tag] = item.find(tag).get_text()\n\n news_data['pubDate'] = parse(item.find('pubDate').get_text())\n media = item.find('media:content')\n\n if media:\n news_data['media'] = media.get('url')\n else:\n news_data['media'] = None\n\n yield news_data", "def news()->str:#return array[news desc,news link]\n event_log(\"retrieve news data....\",\"\")\n c = 0\n location = read_json(\"news_api\")[0]\n main_url = \"https://newsapi.org/v2/top-headlines?country=\"+location+\"&apiKey=\"+read_json(\"news_api\")[1]+\"\"#add a country selection optin via json\n page = requests.get(main_url).json()\n article = page[\"articles\"]\n news_result = []\n for data in article:\n news_result.append([data[\"title\"],str(data[\"url\"]).replace('\"',\" \")])#exctracts the wanted data from api\n if c == 5:#add this to json file so scalibility\n break\n c+=1\n return news_result", "def post_list_view(request, *args, **kwargs):\n qs = Post.objects.all()\n posts_list = [{\"id\": x.id, \"content\": x.content, \"likes\": random.randint(0, 120), \"reposts\": random.randint(0, 10)} for x in qs] #{\"id\": x.id, \"content\": x.content, \"likes\": random.randint(0, 120), \"reposts\": random.randint(0, 10) }/x.serialize()\n data = {\n \"isUser\": False,\n \"response\": posts_list\n }\n return JsonResponse(data) #, save=False", "def get(self):\n return GlobalNews.retrieve()", "def guidebook_news_feed(request):\n current_site = Site.objects.get_current()\n\n feed_title = 'DjangoCon US News Updates'\n feed_description = 'The latest updates and additions for DjangoCon US.'\n feed_mimetype = 'application/rss+xml'\n feed_template = 'pinax/blog/rss_feed.xml'\n\n blog_url = 'http://%s%s' % (current_site.domain, reverse('blog'))\n # feed_url = 'http://%s%s' % (current_site.domain, reverse(url_name, kwargs=kwargs))\n\n posts = Post.objects.published().exclude(title__endswith='Sponsor')\\\n .order_by('-published')\n\n if posts:\n feed_updated = posts[0].updated\n else:\n feed_updated = datetime(2009, 8, 1, 0, 0, 0)\n\n feed = render_to_string(feed_template, {\n # 'feed_id': feed_url,\n 'feed_title': feed_title,\n 'feed_description': feed_description,\n 'blog_url': blog_url,\n # 'feed_url': feed_url,\n 'feed_updated': feed_updated,\n 'entries': posts,\n 'current_site': current_site,\n })\n\n return HttpResponse(feed, content_type=feed_mimetype)", "def get_featured_articles(request):\n try:\n count = 1\n if 'count' in request.POST and int(request.POST['count']):\n count = int(request.POST['count'])\n\n newest_list = []\n for article in Article.objects.order_by('-modified')[:count]:\n newest_list.append(article.dump_to_dict())\n\n popular_list = []\n for article in Article.objects.order_by('-views')[:count]:\n popular_list.append(article.dump_to_dict())\n\n return format_ajax_response(True, \"Featured articles retrieved successfully.\", {'newest': newest_list,'popular': popular_list})\n except Exception as ex:\n logger.error(\"Failed to get_featured_articles: %s\" % ex)\n return format_ajax_response(False, \"There was an error retrieving the featured articles.\")", "def json_view(self, recursive=False):\n\n context = self.context.aq_inner\n data = self.export(context, recursive=recursive)\n pretty = json.dumps(data, sort_keys=True, indent=4)\n self.request.response.setHeader(\"Content-type\", \"application/json\")\n return pretty", "def all_news(request):\n\n all_news = News.objects.all().order_by(\"-date_added\")\n context = {\n 'news': all_news,\n 'show_without_bag': True\n }\n return render(request, 'news/news.html', context)", "def get_articles():\n _, articles = base_query(db_session)\n return jsonify([p.serialize for p in articles])", "def news(self):\n\n # Get articles with search term, if available, from each News API source\n news_api_articles = pd.DataFrame()\n\n q = urllib.parse.quote(\" OR \".join(self.search_terms), safe='')\n\n response = requests.get(\"https://newsapi.org/v2/everything?q=\" + q + \"&from=\" + datetime.now().strftime(\n \"%Y-%m-%d\") + \"&sortBy=popularity&pageSize=100&apiKey=\" + self.__news_api_key)\n\n if response.status_code == 200:\n data = json.loads(response.text)\n\n source_articles = []\n\n for article in data['articles']:\n source_articles.append([article['title'],\n article['description'],\n article['url'],\n article['publishedAt']])\n\n source_articles = pd.DataFrame(source_articles, columns=['title', 'description', 'url', 'publishedAt'])\n news_api_articles = pd.concat([news_api_articles, source_articles])\n\n news_api_articles = news_api_articles.reset_index(drop='True')\n\n news_api_articles['publishedAt'] = news_api_articles['publishedAt'].apply(pd.to_datetime)\n\n news_api_articles = news_api_articles.fillna(' ')\n\n term_in_title = news_api_articles['title'].apply(self.any_term)\n\n news_api_articles = news_api_articles[term_in_title]\n\n if (len(news_api_articles) > 10):\n news_api_articles = news_api_articles[0:10]\n\n else:\n print(\"News API failed to return any items\")\n\n # Create shortened links using bitly if access token is provided\n if self.__bitly_access_token != '':\n\n bitly_urls = []\n\n for index, article in news_api_articles.iterrows():\n url = article['url']\n bitly_response = requests.get(\"https://api-ssl.bitly.com/v3/shorten\",\n params={'longUrl': url, 'access_token': self.__bitly_access_token})\n\n if bitly_response.status_code == 200:\n data = json.loads(bitly_response.text)\n bitly_urls.append(data['data']['url'])\n\n news_api_articles['url'] = bitly_urls\n\n # Store final list to TwitterBot object\n self.list = news_api_articles\n\n return", "def as_json(self):", "def list(self, request):\n return HttpResponse(request.user.series_list, content_type=\"application/json\")", "def storiesOnMap(request):\n requestDict = request.POST\n zoom = int(requestDict['zoom'])\n north = Decimal(requestDict['north'])\n south = Decimal(requestDict['south'])\n east = Decimal(requestDict['east'])\n west = Decimal(requestDict['west'])\n # The \"old\" parameter contains a commma-separated list of story ids that are already displayed by the browser\n currentlyDisplayedStoryIds = requestDict['old'].split(',')\n\n storiesToDisplay = models.Story.objects.filter(latitude__range=(south, north)).filter(longitude__range=(west, east)).order_by('-created')\n\n # Filter the amount of story markers to display based on the current zoom level\n if len(storiesToDisplay) >= (zoom-1)*5:\n storiesToDisplay = storiesToDisplay[(zoom-1)*5:zoom*5]\n\n response = []\n for story in storiesToDisplay:\n if str(story.id) not in currentlyDisplayedStoryIds:\n\n if story.contributor:\n\t\tcontribName = story.contributor.name\n else: \n contribName = None\n langName = story.language.name if story.language else None\n response.append({'id': story.id,\n 'title': story.title,\n 'summary': story.summary,\n 'link': story.link,\n 'has_flv':story.has_flv(),\n 'flv_id':story.flv_id(),\n 'media_type':story.media_type,\n 'imageRef': story.contributor.imageRef,\n 'contributor': contribName,\n 'language': langName,\n 'created': story.created.strftime('%d %B %Y'),\n 'lat': float(story.latitude),\n 'long': float(story.longitude)})\n return JsonResponse(response)", "def all_bugs_chart(request):\n labels = []\n data = []\n\n queryset = Bug.objects.values('title', 'id').order_by('-created').exclude(status='Resolved').annotate(\n bug_votes=Count('votes'))[:5]\n for entry in queryset:\n labels.append(entry['title'])\n data.append(entry['bug_votes'])\n\n return JsonResponse(data={\n 'labels': labels,\n 'data': data,\n })", "def NewsArticles():\n health_articles = get_articles('health')\n education_articles = get_articles('technology')\n return render_template('articles.html',health=health_articles, tech =education_articles)", "def main(feed=None):\n feed_processor = core.FeedProcessor()\n feed_processor(feed_type=feed)\n return feed_processor.feed_json", "def search_news(request):\n try:\n query_string = ''\n if request.GET['search_text'].strip() != '':\n query_string = '&title='+request.GET['search_text']\n response = requests.get(APIURL + '/articles/?format=json'+query_string)\n parser = json.loads(response.content)\n return render_to_response('article/search_result.html', {'articlelist':parser})\n except:\n raise Http404(\"Search Item error\")", "def weblinks_view(request):\n\n\trecords = WebLinks.objects.filter(user_id=request.user.id)\n\tjson_response = [dict(title=record.title, url=record.url, description=record.description) for record in records]\n\treturn JsonResponse(json_response, safe=False)", "def fetch_news(n):\n\n # This is the list we will use the pass back the news information.\n data = []\n\n # Get news stories from the MEN RSS feed.\n response = feedparser.parse('https://www.manchestereveningnews.co.uk/?service=rss')\n\n # Loop through the news items, and the pull out the data we need.\n for news in response.entries[:n]:\n data.append({\n 'headline': news.title,\n 'content': news.description,\n })\n\n return data", "def all_features_chart(request):\n labels = []\n data = []\n\n queryset = Feature.objects.values('title').order_by('-created').exclude(status='Implemented').annotate(\n feature_purchases=Sum('purchases'))[:5]\n for entry in queryset:\n labels.append(entry['title'])\n data.append(entry['feature_purchases'])\n\n return JsonResponse(data={\n 'labels': labels,\n 'data': data,\n })", "def news(self) -> List[News]:\n return self._news", "def run(self,dispatcher,tracker,domain): \n data=newsapi(\"us\")\n leng=len(data)\n for i in range(leng):\t\n gt = {\n \"attachment\": {\n \"type\": \"template\",\n \"payload\": {\n \"template_type\": \"generic\",\n \"elements\": [\n {\n \"title\": data['articles'][i]['title'],\n \"image_url\":data['articles'][i]['urlToImage'],\n \"subtitle\": data['articles'][i]['description'],\n \"buttons\": [\n {\n \"type\": \"web_url\",\n \"url\": data['articles'][i]['url'],\n \"title\": \"Read More\"\n },\n ]\n },\n ]\n }\n }\n } \n dispatcher.utter_custom_json(gt)\n return []", "def get(self, request):\n \n posts = post.objects.filter(unlisted=False,visibility=\"PUBLIC\")\n\n page = self.request.GET.get('page', 1)\n page_num = self.request.GET.get('size', 1000)\n \n paginator = custom()\n results = paginator.paginate_queryset(posts,request)\n serializer = postSerializer(results, many=True)\n #return JsonResponse(serializer.data,safe=False)\n #http://stackoverflow.com/questions/34798703/creating-utf-8-jsonresponse-in-django\n #return JsonResponse(json.dumps(serializer.data, ensure_ascii=False),\n #safe=False)\n\n\n return JsonResponse(OrderedDict([('count', paginator.count),\n ('current', page),\n ('next', paginator.get_next_link()),\n ('previous', paginator.get_previous_link()),\n ('size', page_num),\n ('posts', serializer.data)]))\n\n \n\n\n\n\n\n #data = JSONParser().parse(request)\n #serializer = postSerializer(data=data)\n #if serializer.is_valid():\n # serializer.save()\n # return JsonResponse(serializer.data, status=201)\n #return JsonResponse(serializer.errors, status=400)", "def json(self):\n return {'id': self.id, 'name': self.name, 'description': self.description}", "def articleList():\n articles = get_news(\n 5, since=news.YESTERDAY.strftime(\"%yyyy-%mm-%dd\"), query=\"covid\"\n )\n title_list = []\n desc_list = []\n url_list = []\n image_list = []\n source_list = []\n for art in articles:\n image_list.append(art.image)\n title_list.append(art.title)\n source_list.append(art.source)\n desc_list.append(art.description)\n url_list.append(art.url)\n socketio.emit(\n ARTICLE,\n {\n \"title\": title_list,\n \"desc\": desc_list,\n \"url\": url_list,\n \"img\": image_list,\n \"sources\": source_list,\n },\n )\n return True", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(self.get_data(context))", "def index_json():\n\n response = views.get_feature_collection_metadata(config)\n\n return make_response(jsonify(response))", "def json_sluglist_latest():\n posts = posts_base.order_by(Post.pubdate.desc())[:app.config['FEEDITEMS']]\n out = {'posts': []}\n for post in posts:\n out['posts'].append([post[0].pubdate.strftime(app.config['POST_DATETIME_FORMAT']), post[0].slug])\n\n return jsonify(out)", "def getchart(request):\n data = ast.literal_eval(request.body.decode('utf-8'))\n data['user'] = request.user\n data = newchart(data)\n return JsonResponse(data)", "def Topic_index(request):\n \"\"\"return path,date of images of topic to javascript\"\"\"\n fileID=get_sorted_img_list()\n week_img_list = simplejson.dumps(fileID)\n #latest\n path=fileID[0][0]\n date=fileID[0][1]\n print date\n return render(request, 'WebPtt/Gossip_topic_model.html',{'pic_path':path,'week_img_list':week_img_list,'date':date})", "def get(self, request, *args, **kwargs):\n if self.request.is_ajax():\n data = self.get_context_data(**kwargs)\n return JsonResponse({\n 'has_more': data['has_more'],\n 'lastPublished': data['last_published'],\n 'content': render_to_string(\"partials/posts_list.html\", data, request=self.request)\n })\n\n return super().get(request, *args, **kwargs)", "def json(self):\n return {\n 'uri': self.view_uri,\n 'created': time.strftime('%c', time.gmtime(self.created)),\n 'created_timestamp': self.created,\n 'exception_type': str(self.exc_type),\n 'exception': str(self.exc_value),\n }", "def print_json_content(self, rss_feed):\n\n self.print_if_verbose(\n f\"Method 'print_json_content' is working: \\n\"\n f\"RSS feed will be printed in JSON format: \\n\"\n )\n\n json_content = json.dumps(rss_feed, indent=3)\n print(json_content)\n\n self.print_if_verbose(f\"Method 'print_json_content' is finished. \\n\")\n\n return \"Content is printed in JSON format\"", "def api_get_post(request, post_id):\n\n post = get_object_or_404(Post, id=post_id)\n\n json = serializers.serialize(\"json\", [post], fields=(\n \"pub_time\", \"_text_rendered\", \"title\", \"text\", \"image\",\n \"image_width\", \"image_height\", \"replies\", \"tags\"\n ))\n\n return HttpResponse(content=json)", "def serialize(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'description': self.description,\n 'ranking': self.ranking,\n 'created_date': self.created_date,\n }", "def feed(self):\r\n return feed.Feed(self)", "def json_response(self, request, *args, **kwargs):\n\n return HttpResponse(self.construct_json(),\n content_type='application/json',\n mimetype='application/json', status=self.status)", "def news_view():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n edit_mode_string = request.args.get('edit', None)\n news_list = get_news_list()\n if edit_mode_string == 'true':\n edit_mode = True\n else:\n edit_mode = False\n if check_authentication(session_id, user_id) and is_admin_user(user_id):\n return render_template('news_manager.html', user=user_id, session_id=session_id, edit_mode=edit_mode,\n news_list=news_list)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)", "def run(self,dispatcher,tracker,domain): \n data=newsapi(\"in\")\n leng=len(data)\n for i in range(leng):\t\n gt = {\n \"attachment\": {\n \"type\": \"template\",\n \"payload\": {\n \"template_type\": \"generic\",\n \"elements\": [\n {\n \"title\": data['articles'][i]['title'],\n \"image_url\":data['articles'][i]['urlToImage'],\n \"subtitle\": data['articles'][i]['description'],\n \"buttons\": [\n {\n \"type\": \"web_url\",\n \"url\": data['articles'][i]['url'],\n \"title\": \"Read More\"\n },\n ]\n },\n ]\n }\n }\n } \n dispatcher.utter_custom_json(gt)\n return []", "def get_headlines():\n country = request.args.get('country', type=str)\n if country is not None:\n data = te.getNews(country=country).dropna()\n return jsonify(data.to_dict(orient='records'))\n data = te.getNews()\n return jsonify(te.getNews().dropna().to_dict(orient='records'))", "def Export(self):\n\n current_time = datetime.datetime.now(tz.UTC)\n\n self.data = np.load(self.cache_path, allow_pickle=True)[()]\n\n news = []\n\n ID = self.data.keys()\n\n for id in ID:\n v = self.data[id]\n if 'address' in v and \"河南\" in v['address'] and v['valid'] == 1\\\n and current_time - parse(v['time']) < datetime.timedelta(hours=12):\n news.append({\"Time\": v['time'], \"address\": v['address'], \"location\": v['location'], \"post\": v['post'],\n \"link\": v[\"link\"]})\n\n with open(self.output_path, \"w\", encoding=\"utf-8\") as fp:\n json.dump(news, fp, ensure_ascii=False, indent=4)\n\n print(\"Export %d info\" % len(news))", "def index(self):\r\n shows = Shows.query.order_by(asc(Shows.ShowID)).all()\r\n content = jsonify({\r\n \"shows\": [{\r\n \"date\": get_iso_format(show.ShowDate),\r\n \"countryCode\": show.CountryCode,\r\n \"country\": show.Country,\r\n \"city\": show.City,\r\n \"venue\": show.Venue,\r\n \"setlist\": self.get_setlist(show.ShowID),\r\n \"otherBands\": self.get_other_bands(show.ShowID),\r\n \"people\": self.get_show_people(show.ShowID),\r\n } for show in shows]\r\n })\r\n\r\n return make_response(content, 200)", "def latest_content(request):\n latest_articles = Article.published_articles()[:5]\n latest_comments = Comment.objects.all().order_by('-pub_date')[:5]\n tags = Tag.objects.annotate(num_articles=Count('article')).order_by(\n '-num_articles')\n contributors = Contributor.objects.annotate(\n num_articles=Count('article')).order_by('-num_articles')\n return {'latest_articles': latest_articles,\n 'latest_comments': latest_comments,\n 'tags': tags,\n 'contributors': contributors,\n }", "def get_remote_news_items(self):\n items = []\n params = {\n \"base_url\": self.osha_json_url,\n \"lang\": api.portal.get_tool(\"portal_languages\").getPreferredLanguage(),\n \"query_tags\": self.remote_news_query_tags,\n }\n qurl = \"{base_url}/{lang}/services/hw/news/{query_tags}\".format(**params)\n result = urlopen(qurl)\n if result.code == 200:\n json = load(result)\n for node in json.get(\"nodes\"):\n item = node.get(\"node\")\n pd = item.get('publication_date', '')\n items.append({\n 'remote_item': True,\n 'Title': item['title'],\n 'Date': (\n pd and DateTime(pd, datefmt=\"international\").strftime(\n \"%Y/%m/%d %H:%M\") or \"\"),\n 'getURL': item.get('path'),\n 'path': item.get('path'),\n 'Description': item.get('summary', '') or item.get('body', ''),\n 'text': item.get('summary', '') and item.get('body', '') or '',\n 'remote_image': item.get('image', ''),\n 'node_id': item.get('nid'),\n })\n return items", "def to_json(self):\n return {\n \"category_name\": self.category_name,\n \"summary\": self.summary,\n \"image\": self.image,\n \"category_slug\": self.category_slug,\n }", "def get_chart_one(request):\r\n json_str = []\r\n \r\n usuarios = Usuario.objects.all()\r\n for usuario in usuarios:\r\n peticiones = Peticion.objects.filter(usuario=usuario)\r\n json_str.append({ \r\n 'name': u'%s %s' % (usuario.persona.nombre,\r\n usuario.persona.apellidos),\r\n 'data': len(peticiones)\r\n }) \r\n json_obj = json.dumps(json_str, sort_keys=True, indent=4)\r\n response = HttpResponse(json_obj, mimetype=\"application/json\") \r\n return response", "def projects_JSON(request):\n projects = Project.objects.all()\n for p in projects:\n p.description = bleach.clean(markdown.markdown(p.description, extensions=['markdown.extensions.fenced_code']), strip=True)\n projects_as_json = serializers.serialize(\n 'json',\n projects,\n fields=('title',\n 'posted',\n 'difficulty',\n 'tags',\n 'articles',\n 'user',\n 'description',\n 'pk'),\n use_natural_foreign_keys=True)\n return HttpResponse(json.dumps(projects_as_json), content_type='json')", "def run(self,dispatcher,tracker,domain): \n data=newsapi(\"au\")\n leng=len(data)\n for i in range(leng):\t\n gt = {\n \"attachment\": {\n \"type\": \"template\",\n \"payload\": {\n \"template_type\": \"generic\",\n \"elements\": [\n {\n \"title\": data['articles'][i]['title'],\n \"image_url\":data['articles'][i]['urlToImage'],\n \"subtitle\": data['articles'][i]['description'],\n \"buttons\": [\n {\n \"type\": \"web_url\",\n \"url\": data['articles'][i]['url'],\n \"title\": \"Read More\"\n },\n ]\n },\n ]\n }\n }\n } \n dispatcher.utter_custom_json(gt)\n return []", "def json_frapp(request):\n from pv.settings import MEDIA_URL\n\n if request.GET.get('date') == None:\n start = datetime.combine(date.today(), time(0, 0))\n else:\n start = datetime.combine( datetime.strptime(request.GET.get('date'), '%Y-%m-%d').date(), time(0, 0))\n\n end = datetime.combine(start, time(23, 59))\n\n timeslots = TimeSlot.objects.filter(start__gte=start,start__lte=end).select_related('show').order_by('start')\n\n\n '''Generate categories object for output'''\n\n categories = Category.objects.all()\n categories_output = []\n\n for c in categories:\n c_entry = {\n 'id': c.id,\n 'color': c.color.replace('#', '').upper(),\n 'namedisplay': c.category,\n 'description': c.description\n }\n\n categories_output.append(c_entry)\n\n # Get all series for timeslots\n series = set()\n for ts in timeslots:\n series.add(ts.show)\n\n\n '''Generate series object for output'''\n\n series_output = []\n\n for s in series:\n metainfos = []\n metainfos.append({ 'key': 'ProduzentIn', 'value': ', '.join(ts.show.hosts.values_list('name', flat=True)) })\n metainfos.append({ 'key': 'E-Mail', 'value': ', '.join(ts.show.hosts.values_list('email', flat=True)) })\n\n image = '' if s.image.name == None or s.image.name == '' else str(get_current_site(request)) + MEDIA_URL + s.image.name\n url = '' if s.website == None or s.website == '' else s.website\n\n # Get active schedules for the given date\n # But include upcoming single timeslots (with rrule_id=1)\n schedules = Schedule.objects.filter( Q(show=s.id,is_repetition=False) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n schedules_repetition = Schedule.objects.filter( Q(show=s.id,is_repetition=True) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n broadcastinfos = ''\n\n if not schedules.exists():\n continue\n\n for schedule in schedules:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n if schedules_repetition.exists():\n broadcastinfos = broadcastinfos + 'Wiederholung jeweils:'\n for schedule in schedules_repetition:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n s_entry = {\n 'id': s.id,\n 'categoryid': s.category.values_list('id', flat=True)[0],\n 'color': s.category.values_list('color', flat=True)[0].replace('#', '').upper(),\n 'namedisplay': s.name,\n 'description': s.description,\n 'url': url,\n 'image': image,\n 'broadcastinfos': broadcastinfos,\n 'metainfos': metainfos\n }\n\n series_output.append(s_entry)\n\n\n '''Generate shows object for output'''\n\n shows_output = []\n\n for ts in timeslots:\n\n is_repetition = ' ' + _('REP') if ts.schedule.is_repetition is 1 else ''\n namedisplay = ts.show.name + is_repetition\n description = ts.show.description\n url = str(get_current_site(request)) + '/shows/' + ts.show.slug\n urlmp3 = ''\n\n # If there's a note to the timeslot use its title, description and url\n try:\n note = Note.objects.get(timeslot=ts.id)\n namedisplay = note.title + is_repetition\n description = note.content\n url = str(get_current_site(request)) + '/notes/' + note.slug\n urlmp3 = note.audio_url\n except ObjectDoesNotExist:\n pass\n\n ts_entry = {\n 'id': ts.id,\n 'seriesid': ts.show.id,\n 'datetimestart': ts.start.strftime('%d.%m.%Y %H:%M:%S'),\n 'datetimeend': ts.end.strftime('%d.%m.%Y %H:%M:%S'),\n 'namedisplay': namedisplay,\n 'description': description,\n 'url': url,\n 'urlmp3': urlmp3,\n }\n\n shows_output.append(ts_entry)\n\n output = {}\n output['categories'] = categories_output\n output['series'] = series_output\n output['shows'] = shows_output\n\n return HttpResponse(json.dumps(output, ensure_ascii=False).encode('utf8'),\n content_type=\"application/json; charset=utf-8\")", "def get(self, request):\n query = Article.objects.exclude(in_menu=True)\n\n article_filter = Q()\n searchable_paths = {}\n\n search_terms = [term for term in request.GET.get(\"query\").split(\" \")]\n for term in search_terms:\n searchable_paths[term] = Q(headline__icontains=term) | Q(\n teaser__icontains=term\n )\n\n for key in searchable_paths.keys():\n article_filter &= searchable_paths[key]\n\n query = query.filter(article_filter).distinct()\n articles = [\n {\n \"updated\": article.updated,\n \"headline\": article.headline,\n \"teaser\": article.teaser,\n \"tags\": [tag.name for tag in article.tags.all()],\n \"url\": reverse(\n \"article.detail\",\n kwargs={\n \"year\": article.created.year,\n \"month\": article.created.month,\n \"slug\": article.slug,\n },\n ),\n }\n for article in query\n ]\n\n return JsonResponse({\"articles\": articles})", "def json(data):\n if isinstance(data, dict):\n data = ujson.encode(data)\n uid = str(uuid.uuid4())\n display(HTML('<div id=\"{0}\" style=\"height: 600px; width:100%;\"></div>'.format(uid)))\n display(Javascript(\"\"\"\n require([\"https://rawgit.com/caldwell/renderjson/master/renderjson.js\"], function() {\n document.getElementById('%s').appendChild(renderjson(%s))\n });\n \"\"\" % (uid, data)))", "def json(self, **kwargs):\n return json.loads(self.content, **kwargs)", "def as_dict(self):\r\n return {\r\n \"text\": {\r\n \"title\": self.title_text,\r\n \"body\": self.body_text,\r\n },\r\n \"html\": {\r\n \"title\": self.title,\r\n \"body\": self.body,\r\n },\r\n \"seen\": self.seen,\r\n \"sent_at\": self.sent_at,\r\n }", "def posts_get():\n \n\n # Get and filter the posts from the database\n songs = session.query(models.Song).all()\n \n # Convert the posts to JSON and return a response\n data = json.dumps([song.as_dictionary() for song in songs])\n return Response(data, 200, mimetype=\"application/json\")", "def make_feed_dict(self, data):\r\n raise NotImplementedError", "def index(request):\n\n\t#define variables\n\targs = {}\n\tall_teams = []\n\tall_news = []\n\t\n\t#getting data form models\n\tleagues = League.objects.all()\n\t\n\t#in this loop we get all teams in our all leagues to a 'all_teams' list \n\tfor league in leagues:\n\t\tall_teams += league.team_set.all()\n\n\t#in this loop we get all new form every team which is in 'all_teams' list and place that new into 'all_news' list \n\tfor team in all_teams:\n\t\tall_news += team.news_set.all()\n\n\t#sort news by date so that last news pop up first\n\tall_news = reversed(sorted(all_news, key=lambda ne:ne.date))\n\t#taking forst 50 news of all news\n\tall_news = list(all_news)[:50]\n\t#context data initialization into dictionary 'args'\n\targs['news'] = all_news\n\targs['leagues'] = leagues\n\n\treturn render_to_response('news/index.html',args)", "def articles():\n \n # Parse through RSS feed of Get Rich Slowly\n feed = feedparser.parse(\"http://www.getrichslowly.org/blog/feed/\")\n \n # Get current username\n username = get_user()\n \n return render_template(\"articles.html\", username=username, feed=feed)", "def get_data(self):\n return self.data.to_json()", "def showItemsJSON():\n items = db.getAllItems()\n return jsonify(CategoryItems=[i.serialize for i in items])", "def to_dict(self):\n return {\n 'title': self.title,\n 'description': self.description,\n 'dec_description': self.dec_description,\n 'link': self.link,\n 'pubDate': self.pubDate.strftime(\"%a, %d %b %Y %H:%M\"),\n 'media': json.loads(self.media),\n 'source': self.source.url,\n 'links': json.loads(self.links),\n 'dec_links': json.loads(self.dec_links),\n }", "def index(self):\n \n return self.view.render('index.html', {\"posts\"=posts})", "def get_big_feed():\n\n # A list of news items. We need to preserve it in order to create it once\n # during the first call, and to\n # return these items from this already existing list\n big_feed_gen = None\n\n def nested_return_feed():\n nonlocal big_feed_gen\n\n if big_feed_gen:\n try:\n return json.dumps(next(big_feed_gen), indent=4, sort_keys=True,\n separators=(',', ': '), ensure_ascii=False)\n except StopIteration:\n return '', 204\n else:\n big_feed_gen = get_feed_generator()\n return json.dumps(next(big_feed_gen), indent=4, sort_keys=True,\n separators=(',', ': '), ensure_ascii=False)\n return nested_return_feed", "def feed(self):\n return feed.Feed(self)" ]
[ "0.673006", "0.66394603", "0.6454153", "0.6373204", "0.6363174", "0.63411576", "0.6298449", "0.61722803", "0.61526084", "0.60082966", "0.59839356", "0.5969505", "0.5918859", "0.5916364", "0.5914569", "0.5899843", "0.58803356", "0.5869351", "0.58558553", "0.5835319", "0.58160734", "0.5789556", "0.578666", "0.5769338", "0.57673436", "0.575199", "0.57514006", "0.5728741", "0.5715693", "0.57057583", "0.5700944", "0.56653327", "0.56580675", "0.5637963", "0.56135064", "0.5610844", "0.5608263", "0.55990064", "0.55826885", "0.5582132", "0.55744374", "0.55644864", "0.5562998", "0.5554155", "0.5544765", "0.5528018", "0.5527508", "0.5527318", "0.55093616", "0.5498074", "0.5492856", "0.548424", "0.547956", "0.547839", "0.54766935", "0.546302", "0.5453283", "0.54493934", "0.54376054", "0.5421959", "0.54196537", "0.54152215", "0.54131967", "0.5412921", "0.5412663", "0.54109", "0.5410872", "0.54093087", "0.54083955", "0.5405362", "0.54034257", "0.54026306", "0.54010636", "0.5396703", "0.53824735", "0.5367753", "0.5364813", "0.5353915", "0.53502494", "0.5347915", "0.53455484", "0.5335862", "0.5330128", "0.5324862", "0.532466", "0.53071433", "0.5301614", "0.52930593", "0.5290631", "0.5287332", "0.5286725", "0.52773803", "0.5272741", "0.52688426", "0.526833", "0.5262902", "0.52608186", "0.52591294", "0.52547234", "0.52532595" ]
0.78444064
0
View for retreiving the chat status for Ask a Librarian pages. Returns json.
def chat_status(request): if request.method == 'GET': ask_name = request.GET['name'] status = get_chat_status_and_css(ask_name) return JsonResponse( { 'chat_status': status[0], 'chat_css': status[1], } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chat_status(request):\n team = Hunt.objects.get(is_current_hunt=True).team_from_user(request.user)\n if request.method == 'GET' and request.is_ajax():\n if(team is None):\n return render(request, 'access_error.html', {'reason': \"team\"})\n status = team.num_waiting_messages\n return HttpResponse(json.dumps({\"num_messages\": status}))\n else:\n return HttpResponseNotFound()", "def get_conversation(request):\n collected_values = {}\n\n # Only allow GET requests for this endpoint\n if request.method != 'GET':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n\n # Extract and form params\n uid = request.GET['uid']\n oid = request.GET['oid']\n token = request.GET['token']\n ts_query = request.GET['ts']\n time_user_seen = request.GET.get('tus')\n limit = int(request.GET['limit'])\n\n if ts_query == \"\":\n ts_query = timezone.now()\n\n change_user_seen = False\n if time_user_seen == \"true\":\n change_user_seen = True\n\n # Check if token is valid\n is_valid, collected_values[\"token\"] = check_auth(uid, token, timezone.now())\n if not is_valid:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Invalid Token\"\n return JsonResponse(collected_values, status=400)\n\n # Collect all messages sent by two users in question listed by created at time\n message_query_set = Messages.objects.filter(\n Q(user_id=uid, other_id=oid) |\n Q(other_id=uid, user_id=oid)).order_by('-created_at')[:limit]\n\n # Collect all messages from query\n test_list = []\n for message in message_query_set:\n if change_user_seen:\n message.time_user_seen = timezone.now()\n message.save()\n test_list.append(message.get_map())\n\n # Collect return values\n collected_values[\"messages\"] = test_list\n collected_values[\"success\"] = True\n\n LOGGER.info(\"Get Conversation Result: %s\", collected_values)\n return JsonResponse(collected_values, status=200)", "def load_messages(request):\n thread = models.MessageThread.objects.get(hash_id=request.GET['id'])\n \n # check if user is a part of this chat\n if not request.user in thread.clients.all():\n return HttpResponse(status=403)\n\n # query for messages filter\n q = [Q(thread=thread)]\n if 'before' in request.GET:\n q.append(Q(date__lt=int(request.GET['before'])))\n\n # query messages matching filter\n messages = models.Message.objects.filter(*q).order_by('-id')\n messages_data = serializers.MessageListSerializer(messages[:30]).data\n\n # mark any unread messages in chat as read\n thread.mark_read(request.user)\n return JsonResponse({\"messages\":messages_data,\"end\":messages.count() <= 30})", "def get_conversation_list(request):\n collected_values = {}\n\n # Only accept GET requests for this endpoint\n if request.method != 'GET':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n\n # Extract params\n uid = request.GET['uid']\n token = request.GET['token']\n limit = int(request.GET['limit']) # Force a limiter to see how many users to get\n\n # Check if the token is valid\n is_valid, collected_values[\"token\"] = check_auth(uid, token, timezone.now())\n if not is_valid:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Invalid Token\"\n return JsonResponse(collected_values, status=400)\n\n # Maybe cache or find better way of getting most recent id's messaged\n # Do a walkthrough of all messages and count totals\n # Potential Improvement is to keep a mapping of all messages sent from users to users\n users = {}\n msg_sent = Messages.objects.filter(user_id=uid).order_by('-created_at')[:limit]\n msg_recieved = Messages.objects.filter(other_id=uid).order_by('-created_at')[:limit]\n for msg in msg_sent:\n if users.get(msg.other_id) is None:\n users[msg.other_id] = 1\n else:\n users[msg.other_id] += 1\n for msg in msg_recieved:\n if users.get(msg.user_id) is None:\n users[msg.user_id] = 1\n else:\n users[msg.user_id] += 1\n\n # Collect return values\n collected_values[\"users\"] = users\n collected_values[\"success\"] = True\n\n LOGGER.info(\"Get Conversation List Result: %s\", collected_values)\n return JsonResponse(collected_values, status=200)", "def load_inbox(request):\n threads = models.MessageThread.objects.filter(clients=request.user).annotate(\n unread_count=Count('receipts',filter=Q(receipts__recipient=request.user))\n )\n thread_data = serializers.MessageThreadListSerializer(threads).data\n #user = userauth_models.User.objects.filter(username=request.user.username)\n #print(user.username)\n #print(get_channel_layer())\n #print(request.session['channel_name'])\n return JsonResponse({'threads':thread_data})", "def chatlist(request):\n\n chats = get_chat_list()\n chat_list = pagination(request, chats, CHATS_PER_PAGE)\n\n dic = {'chatlist': chat_list}\n return render_to_response('whatsapp/chatlist.html', dic, context_instance=RequestContext(request))", "async def status(self):\n cmd = subprocess.check_output([\"birdc\", \"show\", \"proto\"])\n for page in chat_formatting.pagify(cmd.decode(), ['\\n', ' '], shorten_by=12):\n await self.bot.say(chat_formatting.box(page))", "def ajax_status(request):\r\n if not request.user.is_authenticated():\r\n raise PermissionDenied\r\n\r\n\r\n qs = UserPreference.objects.filter(\r\n user=request.user,\r\n key=NOTIFICATION_PREF_KEY\r\n )\r\n\r\n return HttpResponse(json.dumps({\"status\":len(qs)}), content_type=\"application/json\")", "def test_get_project_chat_messages_passes(self):\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body, {\"chat\": [], \"pagination\": None})\n # to do: add comments and test again", "def list(self, request, *args, **kwargs):\n return super(PublicChatViewSet, self).list(request, *args, **kwargs)", "def chat(request):\n team = Hunt.objects.get(is_current_hunt=True).team_from_user(request.user)\n if request.method == 'POST':\n # There is data in the post request, but we don't need anything but\n # the message because normal users can't send as staff or other teams\n m = Message.objects.create(time=timezone.now(), text=request.POST.get('message'),\n is_response=False, team=team)\n team.num_waiting_messages = 0\n messages = [m]\n else:\n if(team is None):\n return render(request, 'access_error.html', {'reason': \"team\"})\n if(team.hunt.is_locked and not team.is_playtester_team):\n return render(request, 'access_error.html', {'reason': \"hunt\"})\n if request.is_ajax():\n messages = Message.objects.filter(pk__gt=request.GET.get(\"last_pk\"))\n else:\n messages = Message.objects\n messages = messages.filter(team=team).order_by('time')\n\n # The whole message_dict format is for ajax/template uniformity\n rendered_messages = render_to_string('chat_messages.html',\n {'messages': messages, 'team_name': team.team_name})\n message_dict = {team.team_name: {'pk': team.pk, 'messages': rendered_messages}}\n try:\n last_pk = Message.objects.latest('id').id\n except Message.DoesNotExist:\n last_pk = 0\n team.num_waiting_messages = 0\n\n team.save() # Save last_*_message vars\n context = {'message_dict': message_dict, 'last_pk': last_pk}\n if request.is_ajax() or request.method == 'POST':\n return HttpResponse(json.dumps(context))\n else:\n context['team'] = team\n return render(request, 'chat.html', context)", "def get_status_messages(self):\n return self.data[\"allMessagesForFrontend\"][\"messages\"]", "def get_messages(self):\n other_user_email = request.args.get('other_user_email')\n page = request.args.get('page')\n per_page = request.args.get('per_page')\n if not other_user_email or not page or not per_page:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"query params\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"query params\", 400\n email_token = auth.current_user()[0]\n page = int(page)\n per_page = int(per_page)\n # App sends starting with 1 but we start at 0\n page -= 1\n try:\n message_list, pages = self.friend_database.get_conversation(email_token, other_user_email, per_page, page)\n except NoMoreMessagesError:\n self.logger.debug(messages.NO_MORE_PAGES_ERROR)\n return messages.NO_MORE_PAGES_ERROR, 404\n message_list = [{k:v for k,v in m._asdict().items() if k != \"hidden_to\"} for m in message_list]\n for i in range(len(message_list)):\n message_list[i][\"timestamp\"] = message_list[i][\"timestamp\"].isoformat()\n return json.dumps({\"messages\": message_list, \"pages\": pages}), 200", "def list_messages(chat_id):\n response = jsonify({\"messages\": list_messages_for_chat(chat_id)})\n return response", "def get_unread_messages():\n mark_seen = request.args.get('mark_seen', True)\n unread_msg = g.driver.get_unread()\n\n if mark_seen:\n for msg in unread_msg:\n msg.chat.send_seen()\n\n return jsonify(unread_msg)", "def single_chat(request, key):\n \n if not Messages.objects.using('msgstore').filter(key_remote_jid=key).count() > 0:\n return render_to_response('whatsapp/errors/404.html', context_instance=RequestContext(request)) \n\n msgs = get_chat_messages(key)\n msgs_list = pagination(request, msgs, MESSAGES_PER_PAGE)\n\n dic = {\n 'peer': key,\n 'chatmessages': msgs_list,\n 'gps': Messages.objects.using('msgstore').exclude((Q(longitude='0.0') | Q(latitude='0.0'))),\n 'media': Messages.objects.using('msgstore').exclude(media_url__isnull=True),\n 'PAG_TITLE': 'Conversation'\n }\n\n return render_to_response('whatsapp/chat.html', dic, context_instance=RequestContext(request))", "def status_api(request):\n if request.method == 'GET':\n return JsonResponse({\n 'status': 'OK',\n 'version': __version__\n }, status=200)", "def list(self, request, *args, **kwargs):\n return super(ConversationViewSet, self).list(request, *args, **kwargs)", "def msgStatus():\n return jsonify({\"status\": \"OK\"})", "def get_messages_json(self, limit=10):\n params = self.params\n params[\"limit\"] = limit\n response = requests.get(self.url + \"conversations.history\", params=params)\n return response.json()[\"messages\"]", "def get_status(self):\n r = requests.get(self.base_url + '/status')\n return r.json()", "def list(request, format=None):\r\n user_messages = request.user.profile.recent_messages()\r\n if format and format == '.json':\r\n data = {\r\n 'messages': [msg.to_json() for msg in user_messages],\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')\r\n d = {\r\n 'objects': user_messages,\r\n 'title': 'Messages',\r\n }\r\n return render(request, 'usermessages/list.html', d)", "def retrieve_conversation_history(username: Text) -> Text:\n history = inmemory_storage[username]\n if history:\n return jsonify(history)\n else:\n return jsonify(history), 404", "def GET(self, request):\n timer = Timer()\n try:\n myaccount = get_account_info_for_current_user()\n\n resp_json = myaccount.json_data(\n use_abbr_week_month_day_format=True)\n log_success_response(logger, timer)\n return HttpResponse(json.dumps(resp_json))\n except Exception:\n return handle_exception(logger, timer, traceback)", "def notification_list(request):\n try:\n validator = NotificationListValidator(request.GET)\n valid = validator.validate() # Validate the request\n if valid:\n current_user_id = request.user_id\n page_limit = int(request.GET['page_limit'])\n page_offset = int(request.GET['page_offset'])\n\n # notification listing\n notification_list = Notification.objects.filter(user_id=current_user_id).all().order_by('-created_on')[page_offset:page_limit+page_offset]\n serializer = NoitifcationListSerializer(notification_list, many=True)\n\n # set is_read = 1\n Notification.objects.filter(user_id=current_user_id).update(\n is_read=1\n )\n \n return Response({'data':serializer.data}, status=status.HTTP_200_OK)\n else:\n return Response({'error':requestErrorMessagesFormate(validator.get_message())}, status=status.HTTP_200_OK)\n except Exception as exception:\n logerror('notifications/views.py/notification_list', str(exception))\n return Response({'error':str(exception)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def get(self):\n self.write(\n '{\"error\": \"cryptochat-server main page, '\n 'please refer to /api/message/new or /api/message/updates\"}')", "def get_messages(request, uuid):\n if request.is_ajax():\n user = request.user\n room = Room.objects.get(pk=uuid)\n if room.members.filter(pk=user.pk).exists():\n messages_qs = room.message_set.all()\n page = request.GET.get('page')\n paginator = Paginator(messages_qs, 20)\n try:\n selected = paginator.page(page)\n except PageNotAnInteger:\n selected = paginator.page(1)\n except EmptyPage:\n selected = []\n messages = []\n for message in selected:\n data = {\n 'sender': {\n 'name': str(message.sender),\n 'id': message.sender.pk\n },\n 'message': message.text,\n 'received_room_id': uuid,\n 'date_created': message.date_created.strftime(\"%d %b %Y %H:%M:%S %Z\")\n }\n messages.append(data)\n return JsonResponse(messages, safe=False)\n\n else:\n return Http404(_(\"Sorry! We can't find what you're looking for.\"))\n else:\n return Http404(_(\"Sorry! We can't find what you're looking for.\"))", "def get_languages():\n\n api = (api_name, 'languages')\n\n response = make_request(api=api, action='get', **{})\n status_code = response.status_code\n content = response.text\n\n msg = str(status_code) + ' : ' + content\n \n logger.debug(\"response from spanglish languages: {}\".format(response))\n logger.debug(\"response statuscode from spanglish languages: {}\".format(status_code))\n\n click.echo(\"response message: %s \" % msg)", "def count(request):\r\n n = request.user.profile.unread_message_count()\r\n data = {\r\n 'count': n,\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')", "def show_status():\n return jsonify({\"status\": \"OK\"})", "def list_messages(self):", "def get_messages(chat_id):\n\n mark_seen = request.args.get('mark_seen', True)\n\n chat = g.driver.get_chat_from_id(chat_id)\n msgs = list(g.driver.get_all_messages_in_chat(chat))\n\n for msg in msgs:\n print(msg.id)\n\n if mark_seen:\n for msg in msgs:\n try:\n msg.chat.send_seen()\n except:\n pass\n \n return jsonify(msgs)", "def getPanchayats(request):\n if request.method == 'GET':\n inName=request.GET.get('panchayat', '')\n ptid=request.GET.get('ptid', '')\n blockName=request.GET.get('block', '')\n bid = request.GET.get('bid', '')\n districtName = request.GET.get('district', '')\n stateName = request.GET.get('state', '')\n limit=request.GET.get('limit', '')\n if limit == '':\n limit=50\n else:\n limit=int(limit)\n if ptid == '':\n if bid != '':\n panchayats=Panchayat.objects.filter(name__icontains=inName, block__id=bid)\n else:\n panchayats=Panchayat.objects.filter(name__icontains=inName, block__name__icontains=blockName, block__id__icontains=bid, block__district__name__icontains = districtName, block__district__state__name__icontains = stateName)\n else:\n panchayats=Panchayat.objects.filter(id = ptid)\n \n\n # if inCode != '':\n # panchayats=\n panchayats = panchayats[:limit]\n serializer = PanchayatSerializer(panchayats, many=True)\n return JsonResponse(serializer.data, safe=False)", "def polls_list(request):\n\n polls = Poll.objects.all()[:MAX_OBJECTS]\n data = {\n 'result': list(polls.values('question', 'created_by__username', 'pub_date'))\n }\n return JsonResponse(data)", "def my_channels(_response=Response, _db=Depends(get_db), Authorization=Header(None)):\n\n stat, auth_data = verification_details(Authorization)\n\n if stat != 200:\n _response.status_code = 500\n return {\"data\": \"something happened\"}\n\n res_status, _data = ChatController(_db).get_my_channels(\n auth_data[\"data\"][\"user\"][\"username\"]\n )\n\n _response.status_code = res_status\n\n return {\"data\": _data}", "def get(self) -> List[Conversation]:\n return get_all_conversations(), 200", "def get_status(request):\n if \"liveness\" in request.query_params:\n return Response({\"alive\": True})\n\n app_status = ApplicationStatus()\n celery_param = request.query_params.get(\"celery\", \"false\").lower()\n if celery_param == \"true\":\n return Response(app_status.celery_task_status)\n\n response = {\n \"api_version\": app_status.api_version,\n \"celery_status\": app_status.celery_status,\n \"commit\": app_status.commit,\n \"current_datetime\": app_status.current_datetime,\n \"database_status\": app_status.database_status,\n \"debug\": app_status.debug,\n \"modules\": app_status.modules,\n \"platform_info\": app_status.platform_info,\n \"python_version\": app_status.python_version,\n }\n return Response(response)", "def irrigation_status():\n try:\n response_status = garden_controller.branch_status()\n\n arr = form_responce_for_branches(response_status)\n send_branch_status_message('branch_status', arr)\n return jsonify(branches=arr)\n except Exception as e:\n logging.error(e)\n logging.error(\"Can't get Raspberri Pi pin status. Exception occured\")\n abort(500)", "def get(self, request):\n activities = (\n activitystreams.streams[\"local\"]\n .get_activity_stream(request.user)\n .filter(\n Q(comment__isnull=False)\n | Q(review__isnull=False)\n | Q(quotation__isnull=False)\n | Q(mention_books__isnull=False)\n )\n )\n\n large_activities = Paginator(\n activities.filter(mention_books__isnull=True)\n .exclude(content=None, quotation__quote=None)\n .exclude(content=\"\"),\n 6,\n )\n small_activities = Paginator(\n activities.filter(\n Q(mention_books__isnull=False) | Q(content=None) | Q(content=\"\")\n ),\n 4,\n )\n\n page = request.GET.get(\"page\")\n data = {\n \"large_activities\": large_activities.get_page(page),\n \"small_activities\": small_activities.get_page(page),\n }\n return TemplateResponse(request, \"discover/discover.html\", data)", "def home_view(request):\n status_list = Status.objects.filter(reply_to=None).order_by('-created')\n paginator = Paginator(status_list, 20)\n\n page = request.GET.get('page')\n try:\n statuses = paginator.page(page)\n except PageNotAnInteger:\n statuses = paginator.page(1)\n except EmptyPage:\n statuses = paginator.page(paginator.num_pages)\n\n return render(request, 'status/index.html', {\n 'statuses': statuses,\n })", "def conference_status(request, call_sid):\n resp = VoiceResponse()\n call_status = None\n if request.method == 'POST':\n call_status = request.POST.get('CallStatus', None)\n if request.method == 'GET':\n call_status = request.GET.get('CallStatus', None)\n if call_status == 'completed':\n client.calls.hangup(call_sid)\n if call_status == 'no-answer' or call_status == 'busy' or call_status == 'failed':\n call = client.calls(call_sid).update(\n method=\"POST\",\n url=BASE_URL + \"/NoAnswer?myCallSid=\"+call_sid\n )\n print(call.to)\n return HttpResponse(str(resp))", "def get(self, request, format=None):\n mess = Booking.objects.filter(renter=request.user).order_by('-pk')\n if mess:\n serializer = self.serializer_class(mess, many=True)\n return Response(serializer.data, status=200)\n return Response([], status=200)", "def chat(request):\n return jingo.render(request, 'chat/chat.html')", "def display(request):\n data = {}\n about = About.objects.all()\n if about:\n data['success']=1\n data['message']=\"Comments available\"\n data['about']=[]\n for i in range(len(about)):\n data['about'].append(\n {'about':about[i].about,\n 'about_id':about[i].id,\n })\n return JsonResponse(data)\n else:\n data['success']=0\n data['message']='no about available'\n return JsonResponse(data)", "def get_localizations(request):\n return JsonResponse(get_all_objects(Localization, LocalizationSerializer), safe=False)", "def load_messages():\n # rows = db(db.bds.author == auth.user_id).select()\n rows = db(db.contacts).select()\n d = {r.contact_id: {'name': r.name}\n for r in rows}\n return response.json(dict(board_dict=d))", "def get(self, request, format = None):\n goalProgress = GoalProgress.objects.all()\n serializer = GoalProgressSerializer(goalProgress, many=True)\n return Response(serializer.data)", "def index(request):\n messages = SESSION.get_messages_sent_list(request.session)\n feedbacks = SESSION.get_messages_received_list(request.session)\n \n # initially display the first 20 messages/feedback chronologically\n messages.sort(key=lambda r: r.createdAt, reverse=True)\n feedbacks.sort(key=lambda r: r.createdAt, reverse=True)\n \n # prepare our template context variables use in our messages template\n data = {\n 'messages_nav': True,\n 'messages': messages[:PAGINATION_THRESHOLD],\n 'feedback': feedbacks[:PAGINATION_THRESHOLD],\n 'pag_threshold': PAGINATION_THRESHOLD,\n 'pag_page': 1,\n 'sent_count': len(messages),\n 'feedback_count': len(feedbacks),\n 'tab_feedback': request.GET.get('tab_feedback'),\n }\n \n if SESSION.get_patronStore_count(request.session):\n data['has_patrons'] = True\n \n # inserting this success and error message into the template\n # should be done in a cleaner way - this was done by the first guy\n # I just didn't bother changing it.\n if request.GET.get(\"success\"):\n data['success'] = request.GET.get(\"success\")\n if request.GET.get(\"error\"):\n data['error'] = request.GET.get(\"error\")\n \n return render(request, 'manage/messages.djhtml', data)", "def display_messenger_status(self):\n caller = self.caller\n unread = caller.messages.pending_messengers\n read = caller.messages.messenger_history\n if not (read or unread):\n caller.msg(\n \"You have no messengers waiting for you, and have never received any messengers.\"\n + \" {wEver{n. At all. Not {rone{n.\"\n )\n if read:\n caller.msg(\"You have {w%s{n old messages you can re-read.\" % len(read))\n if unread:\n caller.msg(\n \"{mYou have {w%s{m new messengers waiting to be received.\" % len(unread)\n )", "def get(self):\n all_questions = get_all_questions()\n if all_questions:\n return jsonify({\"message\": \"All questions viewed successfully\", \"questions\": all_questions}), 200\n return jsonify({\"message\": \"No questions posted yet\"}), 404", "def list(self, request, graph_type=None):\n user = {}\n if request.authenticated_userid:\n account = Account.one(request, request.authenticated_userid)\n user['account_id'] = account.id\n if account.check_admin(request, user):\n cleaned_data = {}\n raw_data = NLTKOutput.all(request)\n for record in raw_data:\n if record.account_id in cleaned_data:\n cleaned_data[record.account_id].append(record.nltk_result)\n else:\n cleaned_data[record.account_id] = [record.nltk_result]\n if graph_type == 'stacked_bar':\n return_obj = stacked_bar_for_all(cleaned_data)\n if graph_type == 'pie':\n return_obj = pie_for_all(cleaned_data)\n if graph_type == 'compound_bar':\n return_obj = compound_for_all(cleaned_data) \n return Response(return_obj.encode(), status=200)", "def index(request):\n # Get a list of rooms, ordered alphabetically\n import json\n rooms = Room.objects.order_by(\"title\")\n roomslist = []\n chat_participants = []\n participant_count=1\n user = User.objects.get(username=request.user.username)\n #print(user)\n tuj_list =TaskUserJunction.objects.filter(worker_id = user)\n #role = request.user.profile.role\n role=request.session['role']\n #print(role)\n participants = Profile.objects.filter(user_id=request.user.profile.user_id)\n mentee_task_list = {};\n workername = None\n #mentor_boolean = 'false';\n for p in participants:\n # if(p.is_Mentor):\n # mentor_boolean = 'true';\n # else:\n # mentor_boolean = 'false';\n if(role == 'mentor'):\n try:\n mentee_id = p.get_mentees();\n for m in mentee_id:\n user = User.objects.get(id=m);\n participant_count=1\n workername=user.username\n tuj_list = TaskUserJunction.objects.filter(worker_id = user)\n chat_participants.append(user.username);\n task_list = [];\n for tuj in tuj_list:\n task_list.append((str(tuj),tuj.room_id));\n mentee_task_list[user.username] = task_list;\n except:\n mentee_task_list={}\n else:\n #Get Mentors list\n try:\n workername=p.user\n mentor_id = p.get_mentors()[0];\n participant_count=len(p.get_mentors())\n if(participant_count==1):\n user = User.objects.get(id=mentor_id);\n chat_participants.append(user.username);\n else:\n chat_participants.append(\"Sam\")\n except:\n tuj_list=tuj_list\n \n # Render that in the index template\n return render(request, \"messages.html\", {\n \"tuj_list\": tuj_list,\n \"chat_participants\" : chat_participants,\n \"participant_count\" : participant_count,\n \"mentee_task_list\" : mentee_task_list,\n \"workername\" : workername,\n \"role\" : role,\n \"curr\" : user\n })", "def get_status():\n data = {\n 'status': 'up',\n }\n jsn = json.dumps(data)\n\n resp = Response(jsn, status=200, mimetype='application/json')\n\n return resp", "def __chat_id_response(self) -> int:\n try:\n fetch_updates = self.__get_updates()\n return fetch_updates[0]['message']['chat']['id']\n except TimeoutError as tm_err:\n print(tm_err)\n sys.exit(1)", "def status():\n statuses = get_all_statuses()\n return json.dumps(statuses, indent=4)", "async def statusinfo(self, astable):\n cmd = subprocess.check_output([\"birdc\", \"show\", \"proto\", \"all\", str(astable)])\n for page in chat_formatting.pagify(cmd.decode(), ['\\n', ' '], shorten_by=12):\n await self.bot.say(chat_formatting.box(page))", "def getinformation(request):\n all_information_objects = information.objects.all()\n serializer = information_serializer(all_information_objects, many=True)\n return JsonResponse ({'message':serializer.data}, status=status.HTTP_200_OK)", "def status(self):\n r = requests.get('/'.join([self.base_url, self.ENDPOINT_STATUS]))\n return r.json()", "def home(request):\n response_status = {'text': \"API is up\"}\n return JsonResponse(response_status, status=200)", "def get_messages(request):\n import urllib.parse\n if request.user.is_authenticated():\n msgs = BroadcastMessage.objects.current().for_auth_users()\n else:\n msgs = BroadcastMessage.objects.current().for_unauth_users()\n\n # exclude by those seen\n excluded_session = decode_excluded(request.session.get(\"excluded_broadcasts\", \"\"))\n excluded_cookie = decode_excluded(request.COOKIES.get(\"excluded_broadcasts\", \"\"))\n excluded = excluded_session | excluded_cookie\n msgs = msgs.exclude(pk__in=list(excluded))\n\n # filter them by the HTTP_REFERER\n url_parts = urllib.parse.urlparse(request.META.get('HTTP_REFERER', '/'))\n path = url_parts.path\n valid_messages = [msg for msg in msgs if re.match(msg.url_target, path)]\n msg_list = []\n for msg in valid_messages:\n msg_list.append(msg.msg_info())\n if msg.show_frequency == BroadcastMessage.SHOW_ONCE:\n excluded_cookie.add(msg.pk)\n elif msg.show_frequency == BroadcastMessage.SHOW_ONCE_SESSION:\n excluded_session.add(msg.pk)\n request.session['excluded_broadcasts'] = encode_excluded(excluded_session)\n response = HttpResponse(json.dumps(msg_list),\n content_type=\"application/json\")\n response.set_cookie('excluded_broadcasts', encode_excluded(excluded_cookie))\n return response", "def health_check(request):\n response = {\"Status\": True}\n return JsonResponse(response, safe=False)", "def chat(self):\n return self._get(\"chat\")", "def messages_gps(request):\n\n msgs = get_messages_gps()\n msgs_list = pagination(request, msgs, MESSAGES_PER_PAGE)\n dic = {\n 'chatmessages': msgs_list,\n 'PAG_TITLE': 'Messages with GPS Data'\n }\n\n return render_to_response('whatsapp/chat.html', dic, context_instance=RequestContext(request))", "def get_status_messages(self):\n\n try:\n subContext = conf.EHST_MESSAGES\n connHandler = self._tap._TapPlus__getconnhandler()\n response = connHandler.execute_tapget(subContext, verbose=False)\n if response.status == 200:\n for line in response:\n string_message = line.decode(\"utf-8\")\n print(string_message[string_message.index('=') + 1:])\n except OSError:\n print(\"Status messages could not be retrieved\")", "def get(self, request):\n followers = request.GET.get('followers')\n followings = request.GET.get('followings')\n\n if followers:\n follower_obj = UserFollower.objects.filter(user=request.user.id).first()\n\n follower_serialized = UserFollowerShowSerializer(follower_obj)\n\n response_json = {\n 'status': True,\n 'message': 'successful',\n 'data': follower_serialized.data\n }\n\n return Response(response_json, status=200)\n\n if followings:\n following_obj = UserFollowing.objects.filter(user=request.user.id).first()\n\n following_serialized = UserFollowingShowSerializer(following_obj)\n\n response_json = {\n 'status': True,\n 'message': 'successful',\n 'data': following_serialized.data\n }\n\n return Response(response_json, status=200)\n\n response_json = {\n 'status': False,\n 'message': 'unsuccessful',\n 'data': {}\n }\n\n return Response(response_json, status=200)", "def index(self):\n try:\n query = h.eagerload_morpheme_language_model(Session.query(MorphemeLanguageModel))\n query = h.add_order_by(query, dict(request.GET), self.query_builder)\n return h.add_pagination(query, dict(request.GET))\n except Invalid, e:\n response.status_int = 400\n return {'errors': e.unpack_errors()}", "def list(self, request):\n # Get transaction\n transaction_id = self.request.query_params.get('transaction_id')\n transaction = Transaction.objects.get(id=transaction_id)\n\n return JsonResponse({\"status\": transaction.status, \"transaction_error\": transaction.error_type})", "def handle_state(token: str = \"\"):\n helper = SlackHelper(token)\n\n response = paginated_api_call(\n api_method=helper.client.conversations_list,\n response_objects_name=\"channels\",\n exclude_archived=0,\n types=\"public_channel, private_channel\",\n )\n return {\"channels\": response}", "def message_list(request, sender=None, receiver=None):\n if request.method == 'GET':\n messages = Message.objects.filter(sender_id=sender, receiver_id=receiver, is_read=False)\n serializer = MessageSerializer(messages, many=True, context={'request': request})\n for message in messages:\n message.is_read = True\n message.save()\n return JsonResponse(serializer.data, safe=False)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n # print(data)\n serializer = MessageSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors, status=400)", "def chat():\n kwargs = {\"title\": u\"chat channel\", \"entries\": log.getLogEntries()}\n return render_template(\"chat.html\", **kwargs)", "def test_chat_page(self):\n c_code = 'new-chat'\n rsp = self.client.get('/{}'.format(c_code))\n html = rsp.data\n\n self.assertEqual(200, rsp.status_code)\n # HTML is returned.\n self.assertIn('<html>', html)\n # HTML for chat is retrieved.\n self.assertIn(c_code, html)", "def index(self, req):\n context = req.environ['manila.context']\n\n search_opts = {}\n search_opts.update(req.GET)\n\n # Remove keys that are not related to message attrs\n search_opts.pop('limit', None)\n search_opts.pop('marker', None)\n sort_key = search_opts.pop('sort_key', 'created_at')\n sort_dir = search_opts.pop('sort_dir', 'desc')\n\n messages = self.message_api.get_all(\n context, search_opts=search_opts, sort_dir=sort_dir,\n sort_key=sort_key)\n limited_list = common.limited(messages, req)\n\n return self._view_builder.index(req, limited_list)", "def getPanchayatsAccurateData(request):\n if request.method == 'GET':\n panchayat=request.GET.get('panchayat', '')\n ptid=request.GET.get('ptid', '')\n block = request.GET.get('block', '')\n bid = request.GET.get('bid', '')\n district = request.GET.get('district', '')\n state = request.GET.get('state', '')\n limit=request.GET.get('limit', '')\n if limit == '':\n limit=500\n else:\n limit=int(limit)\n #PTID we need to make it exact. Finyaer make it current finyear\n panchayats = PanchayatStat.objects.filter(panchayat__name__icontains = panchayat, panchayat__id__icontains = ptid, panchayat__block__id__icontains = bid, panchayat__block__name__icontains=block, panchayat__block__district__name__icontains = district, panchayat__block__district__state__name__icontains = state, workDaysAccuracyIndex__gte = 90, finyear = '17')\n\n\n panchayats = panchayats[:limit]\n serializer = PanchayatStatSerializer(panchayats, many=True)\n return JsonResponse(serializer.data, safe=False)", "def blocked(self, request, *args, **kwargs):\n conversation = self.get_object()\n blocked = User.objects.filter(id__in=conversation.blocked)\n self.pagination_class = ShoutitPageNumberPagination\n page = self.paginate_queryset(blocked)\n serializer = ProfileSerializer(page, many=True, context={'request': request})\n return self.get_paginated_response(serializer.data)", "def get_response(self):\n return self.messages", "def msgs(request):\r\n results = list(PFASMsgRecord.objects.values())\r\n for result in results:\r\n # add a field that's not in the DB\r\n result['valid'] = 'literally' not in result['message'].lower()\r\n # change to text so JSON can save it\r\n result['timestamp'] = str(result['timestamp'])\r\n\r\n return HttpResponse(\r\n json.dumps(results, indent=2, sort_keys=True),\r\n content_type='application/json',\r\n )", "def get_homework_statuses(current_timestamp: int) -> dict:\n data = {\"from_date\": current_timestamp}\n headers = {\"Authorization\": f\"OAuth {PRAKTIKUM_TOKEN}\"}\n try:\n homework_statuses = requests.get(\n PRAKTIKUM_API_URL,\n params=data,\n headers=headers,\n )\n # Заккоментриовал, т.к. валятся тесты\n # homework_statuses.raise_for_status()\n except requests.exceptions.RequestException as e:\n logger.exception(e)\n send_message(str(e))\n raise\n return homework_statuses.json()", "def get_all(request):\n\treturn JsonResponse(todo)", "def polls_details(request, pk):\n poll = get_object_or_404(Poll, pk=pk)\n data = {\n 'result': {\n 'question': poll.question,\n 'created_by': poll.created_by.username,\n 'pub_date': poll.pub_date\n }\n }\n return JsonResponse(data)", "def get(self, request):\n return Response(\"Dashboard Listing Page\", status=status.HTTP_200_OK)", "def read_home():\n return {'message': 'API live!'}", "def get_languages(request):\n if request.is_ajax():\n publications = Publication.objects.all()\n language_list = []\n for pub in publications:\n languages = pub.languages.all()\n for lang in languages:\n language_list.append({\"pub\": pub.pk,\"pub_name\":pub.name,\"lang\": u\"%s\" % lang.pk,\"name_lang\":lang.alias})\n data = simplejson.dumps(language_list)\n return HttpResponse(data)", "def librarian_list(request):\n librarian = User.objects.filter(is_student=False, is_lecturer=False, is_parent=False, is_superuser=False)\n user_type = \"Librarian\"\n context = {\n \"librarian\": librarian,\n \"user_type\": user_type,\n }\n return render(request, 'library/librarians_list.html', context)", "def check_status(bot, chat_id, query):\n\n remain = remain_time(chat_id)\n query_id = query.id\n\n message = bot_collection[chat_id].get_remained_message(remain)\n\n bot.answer_callback_query(callback_query_id=query_id, text=message)\n\n pass", "def test_get_unread_status_count(self):\n request = self.factory.get(\"\")\n request.user = self.local_user\n\n with patch(\"bookwyrm.activitystreams.ActivityStream.get_unread_count\") as mock:\n mock.return_value = 3\n result = views.get_unread_status_count(request, \"home\")\n\n self.assertIsInstance(result, JsonResponse)\n data = json.loads(result.getvalue())\n self.assertEqual(data[\"count\"], 3)", "def index():\n #Have to create LOGIN\n #serverxmpp.init()\n return dict()", "def chat(self) -> \"api.Chat\":\n raise NotImplementedError", "def get(self):\n\n return {\"message\": \"Welcome to the news API. \"}", "def show_all(message, bot):\n reports_status = str(message.text)\n try:\n reports = requests.get(\n url=f'http://127.0.0.1:8000/api/v1/report/?status={reports_status}',\n headers={\n 'Authorization': f'Bearer {jwt_token}',\n }\n )\n if reports.status_code == 200:\n result = reports.json()\n output = format_result(result)\n bot.send_message(\n message.from_user.id,\n output,\n parse_mode='HTML',\n )\n elif reports.status_code == 401:\n bot.send_message(\n message.from_user.id,\n 'Ошибка авторизации, проверьте токен',\n )\n except requests.exceptions.ConnectionError:\n bot.send_message(\n message.from_user.id,\n 'Ошибка соединения с сервером',\n )", "def get(self, request):\n announcement_id = request.GET.get(\"id\")\n if announcement_id:\n try:\n announcement = Announcement.objects.get(id=announcement_id)\n return self.success(AnnouncementSerializer(announcement).data)\n except Announcement.DoesNotExist:\n return self.error(\"Announcement does not exist\")\n announcement = Announcement.objects.all().order_by(\"-create_time\")\n if request.GET.get(\"visible\") == \"true\":\n announcement = announcement.filter(visible=True)\n return self.success(self.paginate_data(request, announcement, AnnouncementSerializer))", "def outgoing_messages_view(request):\n try:\n data = JSONParser().parse(request)\n user_id = data['sender_id'].encode('utf-8')\n messages_list = Message.objects.filter(sender_id=user_id)\n serializer = MessageSerializer(messages_list, many=True)\n except Exception as e:\n return Response({})\n return Response(serializer.data)", "def annotation_all_stats(request):\n\n id_report = request.GET.get('report',None)\n language = request.GET.get('language',None)\n\n json_dict = get_annotations_count(id_report,language)\n\n # print('annotations',json_dict)\n return JsonResponse(json_dict)", "def render_GET(self, request):\n request.setHeader(b'content-type', b'application/json; charset=utf-8')\n set_cors(request, 'GET')\n\n timestamp = None\n if b'timestamp' in request.args:\n try:\n timestamp = int(request.args[b'timestamp'][0])\n except ValueError:\n return json.dumps({\n 'success': False,\n 'message': 'Invalid timestamp parameter, expecting an integer'\n }).encode('utf-8')\n\n tx_tips = self.manager.tx_storage.get_tx_tips(timestamp)\n ret = {'success': True, 'tips': [tip.data.hex() for tip in tx_tips]}\n return json.dumps(ret).encode('utf-8')", "def index():\n text_list = ['노인복지']\n fulfillment = {'text':''}\n dialogflow_response = detect_intent_texts('', '', text_list, 'ko')\n fulfillment['text'] = dialogflow_response\n json_fulfillment = json.dumps(fulfillment, ensure_ascii=False)\n\n return jsonify(fulfillment)", "async def get(self):\n await self.handle_request(self.chats_api, 1)", "def view_meetups():\n\n return make_response(jsonify({\n \"status\": 200,\n \"data\": meetups.view_meetups()\n })), 200", "def queue_status(request):\n\n xml = cache.get(settings.CHAT_CACHE_KEY)\n status = 200\n if not xml:\n xml = ''\n status = 503\n return HttpResponse(xml, mimetype='application/xml', status=status)", "def view_status(request, pk):\n\n status = Status.objects.get(pk=pk)\n user_profile = get_users_profile(request.user.id)\n\n context = {\n 'news_feed': [status],\n 'user_profile': user_profile,\n }\n\n return render(request, 'status/view_status.html', context)", "def get_report_translations(request):\n\n id_report = request.GET.get('id_report',None)\n if id_report is not None:\n languages = []\n lang = Report.objects.filter(id_report = id_report)\n for el in lang:\n if el.language not in languages:\n languages.append(el.language)\n\n json_resp = {}\n # print(languages)\n json_resp['languages'] = languages\n return JsonResponse(json_resp)", "def GET(self):\n web.header(\"Content-Type\",\"application/json; charset=utf-8\")\n data = web.input(module=\"module\")\n module = data[\"module\"]\n count = db_module.get_module_newsNum(module)\n result = json.dumps({\"count\": count})\n return result" ]
[ "0.7056218", "0.58047336", "0.57761455", "0.56156147", "0.56094897", "0.5595043", "0.55575705", "0.5555067", "0.5535522", "0.553365", "0.5530453", "0.55122775", "0.540345", "0.53645366", "0.53573716", "0.53215814", "0.53145057", "0.52810264", "0.5262246", "0.5239236", "0.52295315", "0.5229372", "0.52216506", "0.5219727", "0.5216611", "0.52143365", "0.5213971", "0.5187544", "0.51789004", "0.5177824", "0.5176105", "0.5157143", "0.5154037", "0.51500577", "0.51406693", "0.51380485", "0.5134545", "0.5114321", "0.5094981", "0.50934637", "0.5089629", "0.50571316", "0.50527954", "0.5043728", "0.50434357", "0.50343096", "0.5032079", "0.5032062", "0.5026707", "0.50208706", "0.50157106", "0.50086814", "0.5007704", "0.5002635", "0.49898568", "0.4988651", "0.49841982", "0.4980139", "0.49798295", "0.49785084", "0.4971758", "0.495879", "0.49498272", "0.49442643", "0.49430567", "0.49418598", "0.49394518", "0.49368566", "0.49331415", "0.49319774", "0.49296612", "0.4929069", "0.4924208", "0.49227092", "0.49137104", "0.49105138", "0.49091852", "0.48928583", "0.4878677", "0.4870548", "0.4863661", "0.48606655", "0.485837", "0.4852743", "0.48527423", "0.48517093", "0.48449242", "0.48353043", "0.48187932", "0.48186865", "0.4817548", "0.48160022", "0.48152146", "0.4813951", "0.4807799", "0.4806316", "0.48054883", "0.48015216", "0.47994596", "0.47973204" ]
0.76308614
0
Fonction pour placer un disque de couleur 'col' a la coordonnee (xa,ya)
def create_point(xa,ya,col): disque = canvas.create_oval(xa-(rayon),ya-(rayon),xa+(rayon),ya+(rayon),fill="white",outline=col) return disque
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_xy_position(row, col):\n spacing_x = 86 + 11\n spacing_y = 98 + 8\n top_y = 50\n left_x = 50\n return left_x + col * spacing_x, top_y + row * spacing_y", "def colWithTile(self, pos):\n\n\n return self.colWithBox(pos, [2.0,2.0,2.0])", "def em_coord_turtle(lin, col, dim, tam_celula):\n meio = dim // 2\n x = (col - meio) * tam_celula\n y = (meio - lin) * tam_celula\n return x, y", "def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)", "def collocation_points(self, **kw):\n pass", "def _get_x_y_from_pos(self, col,row): \r\n return (self.margin_left+(self.text_width*col),\r\n self.margin_top+(self.text_height*row))", "def _position_x_to_column(self, x, y):\n col = -1\n if y>self.padding_top and y<self.padding_top+self.len_y_cercles:\n for i in range(self.n_columns):\n if x>self.padding_left+i*63 and x<self.padding_left+i*63+self.diam_cercles:\n col = i+1\n break\n return col", "def coords_to_node(self,row,col):\n return row*self.cols + col + 1", "def coord (i, j):\r\n return j, i", "def rowcol2XY(row,col,CCD):\n pixscale = 0.015 #mm/pix\n X = CCD[1]+1024*pixscale-(col*pixscale+pixscale/2.)\n Y = CCD[2]+2048*pixscale-(row*pixscale+pixscale/2.)\n return X,Y", "def get_cell_coords(self, pt):\n\n\t return int(pt[0] // self.a), int(pt[1] // self.a)", "def get_coords_for_col(self, i):\n X = N.zeros((self.rows,2),dtype=config.floatX)\n X[:,0] = self.xmin + float(i) * self.delta_x\n X[:,1] = self.ymin + N.cast[config.floatX](N.asarray(range(self.rows-1,-1,-1))) * self.delta_y\n\n\n return X", "def col(self, col: tuple) -> list:\n return self.grid[col::9]", "def location_to_pos(self,row, col):\r\n\r\n pos_row = str(row + 1)\r\n pos_col = chr(col + 97)\r\n return pos_col + pos_row", "def _add_coordinate_data(self, df, geom_col):\n x = df.apply(self._get_coords,\n geom_col=geom_col,\n coord_type='x',\n axis=1)\n\n y = df.apply(self._get_coords,\n geom_col=geom_col,\n coord_type='y',\n axis=1)\n return x, y", "def xy_to_rowcol(self, x, y):\n col = int((x - self.board_lft_x) / self.next_square)\n row = int((self.board_top_y - y) / self.next_square)\n return [row, col]", "def drawcolumn(self, colnum: int, epc: str, dst: float) -> None:\n svg = self.svg\n wscr, hscr = svg.get_WH()\n w_rect = 50\n colsep = 20\n w_column = w_rect + colsep\n\n xleft = colnum * w_column\n TOP_MARGIN = 20\n MAXH = hscr - TOP_MARGIN\n MAXDIST = 5.0\n h_rect = (dst*MAXH)/MAXDIST\n print(\"CALC dist {}, maxh {}, maxdist {} h_rect {} \".format(dst, MAXH, MAXDIST, h_rect))\n if h_rect > MAXH:\n h_rect = MAXH\n h_rect = int(h_rect)\n ytop = TOP_MARGIN + (MAXH - h_rect)\n print(\"DRAW {}: {} {} {} {}\".format(colnum, xleft, ytop, w_rect, h_rect))\n red_colorstr = '#ff0066'\n blu_colorstr = '#6600ff'\n svg.rect(xleft, ytop, w_rect, h_rect, red_colorstr)\n svg.text(xleft, ytop, blu_colorstr, epc)", "def get_id_from_coor(self, x, y):\n x_coor = x // self._cell_dim\n y_coor = y // self._cell_dim\n return (x_coor, y_coor)", "def _modify_columns(self, cols, X, y=None):", "def col_for_cell(self, coords, include_self=False):\n row, col = coords\n return ((r, col) for r in self.rows if include_self or r != row)", "def set_col( self, col ):\n self.ix_col = col", "def positionColour(row, col):\n if (row + col) % 2 == 0:\n return BLACK\n else:\n return WHITE", "def draw_pixel(x, y, col):\n unicornhathd.set_pixel(x, 12 - y, col[0], col[1], col[2])", "def get_xy(self, x, y):\r\n\t\treturn self.grid[y, x]", "def set_collocation_points(self, X_f):\n self.t = self.tensor(X_f[:,0:1])\n self.x = self.tensor(X_f[:,1:2])\n self.y = self.tensor(X_f[:,2:3])", "def spot_coords(self,spot):\n if spot == '1':\n return (330 - 60 ,335 - 15)\n if spot == '2':\n return (419 - 60, 335 - 15)\n if spot == '3':\n return (591 - 60, 159 - 15)\n if spot == '4':\n return (588 - 60, 248 - 15)", "def drawColorColumn(x, yseq, zseq):\n dislin.curvy3(x, yseq, zseq, len(yseq))", "def seg_row_col(sp) : \n return src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)", "def coord_char(coord, matrix):\n row_index, column_index = coord\n\n return matrix[row_index][column_index]", "def coordinates(self):", "def redraw_col(cls, col):\n # TODO: It's wasteful to make draw_block get block all over again here!\n for _, coord in MapModel.get_column(col, cls.depth):\n cls.draw_block(coord)", "def grid_to_mouse( pos ):\n ix,iy=pos\n px= ix*CELLSIZE + H_CELLSIZE + ix*CELLGAP\n py= iy*CELLSIZE + H_CELLSIZE + iy*CELLGAP\n return (px,py)", "def schcoords(self, canx, cany):\n # Coordinates of scheme (0,0) corner as canvas coords.\n # Scheme (0,0) is the left, bottom corner (like in mathematic,\n # not like in canvas)\n x0 = (self._cw - self.width)/2\n y0 = (self._ch - self.height)/2 + self.height\n return (canx - x0, y0 - cany)", "def Pos(row, col):\n return ESC + str(row) + ';' + str(col) + 'H'", "def set_2d_location(self, x, y):\r\n self.unif[42:44] = [x, y]", "def _havannah_coord_to_canvas_coord(self, coord):\n col, slant = cubic_to_axial(*coord)\n canvas_x, canvas_y = self.CANVAS_CENTER\n\n canvas_x += col * self.HEX_WIDTH // 4 * 3\n canvas_y += (col * self.HEX_WIDTH // 2) + (slant * self.HEX_WIDTH)\n\n return (canvas_x, canvas_y)", "def click(self, x, y):\n row = int((x - self.x)/self.cell_size)\n col = int((y - self.y)/self.cell_size)\n if 0 <= row < ROWS and 0 <= col < COLS:\n return row, col\n return None", "def node_to_coords(self,node_num):\n row = (node_num - 1) / self.cols\n col = (node_num - 1) % self.cols\n return (row,col)", "def to_coords(self, px, py):\n if px not in range(self.SIZE**2) or py not in range(self.SIZE**2):\n raise IndexError\n return (px // self.SIZE, py // self.SIZE,\n px % self.SIZE, py % self.SIZE)", "def compute_row_col(self, x, y):\n if self.is_in_small_cell_area(x, y):\n x = int(math.fabs(x - self.xoffset))\n col = x / self.small.width\n row = (self.small_yoffset - y) / self.small.height\n else:\n col = (x - self.xoffset) / self.large.width\n row = (self.large_yoffset - y) / self.large.height\n return row, col", "def __check_col(self, x: int, y: int) -> bool:\n return not any([self.__maze[x + i, y] for i in (-1, 0, 1)])", "def col_data_mover_at(row, col):\n if col == 0:\n return NAME_SCHEME[\"memory move\"].format(prefix=f\"l{row}\")\n else:\n return NAME_SCHEME[\"register move right\"].format(pe=f\"pe_{row}_{col - 1}\")", "def getCellpos(self, event):\n e = event.widget\n cx, cy = cart(e.canvasx(event.x), e.canvasy(event.y))\n cellx = int(cx) // self.cell_width\n celly = int(cy) // self.cell_height\n return cellx, celly", "def get_cell_coords(pt):\n\n return int(pt[0] // a), int(pt[1] // a)", "def get_pos(self):\n return [self.row, self.col]", "def map_loc_to_pixel((x, y), xc = 17.25, yc = 630, run = 17.25):\n xp, yp = xc + x*run, yc - y*run\n return (xp, yp)", "def convertFrom1Dto2D(coord, num_cols):\n y = int(np.floor(coord/num_cols))\n x = coord % num_cols\n return (y,x)", "def _get_grid_coord(self, point):\n return tuple([int(point[i] / self._cell_length) for i in range(self._dim)])", "def _goto_piece_xy(self, row, col, adjustment_x=0, adjustment_y=0):\n self.pen.up()\n x = (self.board_lft_x + col * (self.next_square) +\n self.square_side_size * .05) + adjustment_x * self.square_side_size\n y = (self.board_top_y - row * (self.next_square) -\n self.square_side_size * .8) - adjustment_y * self.square_side_size\n self.pen.goto(x, y)", "def cell_from_xy(self,x,y):\n return self.cell_array.item((x,y))", "def coord(self, x, y):\n origin_x = self._raster_meta['transform'][3]\n origin_y = self._raster_meta['transform'][0]\n pixel_x = self._raster_meta['transform'][5]\n pixel_y = self._raster_meta['transform'][1]\n\n x = int((x - origin_x) / pixel_x)\n y = int((y - origin_y) / pixel_y)\n return self[x, y]", "def coord(self):\n fmt = \"{min_col}{min_row}:{max_col}{max_row}\"\n if (self.min_col == self.max_col\n and self.min_row == self.max_row):\n fmt = \"{min_col}{min_row}\"\n\n return fmt.format(\n min_col=get_column_letter(self.min_col),\n min_row=self.min_row,\n max_col=get_column_letter(self.max_col),\n max_row=self.max_row\n )", "def cols(self, col):\n self.col += col", "def _get_grid_coord(wl, bl):\n row = None\n col = None\n\n for i, (l, h) in QLDbEntry.wl_map.items():\n if wl >= l and wl <= h:\n row = i\n break\n\n for i, (l, h) in QLDbEntry.bl_map.items():\n if bl >= l and bl <= h:\n col = i\n break\n\n return col, row", "def ind2coord(self, index):\n\n # assert (index >= 0)\n # assert(index < self.n - 1)\n\n col = index // self.rows\n row = index % self.rows\n\n return [row, col]", "def locate(x, y):\n position(x * 6, y)", "def get_origin(col, row, pattern_size, margin):\n\tw,h = pattern_size\n\torigin = col*(w+margin), row*(h+margin)\n\treturn origin", "def set(self, row: int, col: int, color: Color) -> None:\n super(ColorGrid, self).set(row, col, color)", "def genRowColCell(self, args):\n if len(args) == 2:\n x = int(args[0])\n y = int(args[1])\n if x == 0 and y == 0:\n return self.genMatrix()\n elif x == 0:\n return self.genRow(y - 1)\n elif y == 0:\n return self.genCol(x - 1)\n else:\n return self.genCell(x - 1, y - 1)\n else:\n return self.genMatrix()", "def xy_to_cxcy(xy):\n return torch.cat([(xy[:, 2:] + xy[:, :2]) / 2, # c_x, c_y\n xy[:, 2:] - xy[:, :2]], 1) # w, h", "def getCoordinateAt(self,colIndex,rowIndex):\n gridval = self.grid.get(self.createKey(colIndex,rowIndex),self.defaultVal)\n retvals = [self.currentCol,self.currentRow,gridval]\n return retvals", "def __init__(self, row = 0, col = 0):\n self.row = row\n self.col = col", "def get_index(self, row, col):\n return (row * self.cols) + col", "def test_board_coordinates_toXY():\r\n m = Move()\r\n for col_num, col_name in enumerate(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']):\r\n for row in range(1, 9):\r\n assert m.translate_to_xy(col_name + str(row)) == (Board.SIZE - row, col_num)", "def get_coord(self,x,y,z):\n a = 0\n b = 0\n c = 0\n \n distance = 0\n \n while (distance <= x):\n distance += SQUARE_SIZE\n if ( (x - distance) > - (SQUARE_SIZE / 2) ):\n a += 1\n distance = 0\n \n while (distance <= y):\n distance += SQUARE_SIZE\n if ( (y - distance) > - (SQUARE_SIZE / 2) ):\n b += 1\n distance = 0\n \n while (distance <= z):\n distance += SQUARE_SIZE\n if ( (z - distance) > - (SQUARE_SIZE / 2) ):\n c += 1\n distance = 0\n \n return(a,b,c)", "def chr_coords(s):\n return max_y - (max_y - min_y)*s", "def test_if_row_col_well_retrieved_from_mouse_pos(self):\n ui = UIRender(TestUI.image_path)\n row, col = ui.get_row_col_from_mouse((10,25))\n self.assertEqual(row, 0)\n self.assertEqual(col, 0)", "def mark(board, player, row, col):\r\n pass", "def map_grid_loc_to_pixel((grid, x, y), panel_dimensions = bm_panel_dimensions, xc = 17.25, yc = 630, run = 17.25):\n x_offset = 0\n for panel_index, panel_dim in panel_dimensions.iteritems():\n if panel_index < grid:\n width, height = panel_dim\n x_offset += width*xc\n xp, yp = xc + x*run + x_offset, yc - y*run\n return (xp, yp)", "def indexToPosition(self, col, row):\n columns = \"ABCDEFGH\"\n return columns[col] + str(row + 1)", "def get_row_col(mouse_x, mouse_y):\n # Note: the top row is row=0 (bottom row=2), left col is col=0 (right col=2)\n spacing_x = 86 + 8\n spacing_y = 98 + 5\n top_y = 50\n left_x = 50\n return (mouse_y - top_y) // spacing_y, (mouse_x - left_x) // spacing_x", "def mouse_to_grid( pos ):\n mx,my=pos\n # account for window border and gap between cells\n ix = int((mx-H_CELLSIZE)/(CELLSIZE+CELLGAP))\n iy = int((my-H_CELLSIZE)/(CELLSIZE+CELLGAP))\n # force respect window borders\n if ix<0 or ix>=GRID_X or iy<0 or iy>=GRID_Y:\n return None\n else:\n return (ix,iy)", "def cell_value(self, x, y):\n if x == 8 and y == 0:\n return \"--\"\n (r, g) = self[(x, y)]\n return \"%s%s\" % (r, g)", "def get_xy(self):\r\n return self.board.get_xy()", "def putpixel(self, col, row, color=GREEN):\n if col < 0 or row < 0:\n return\n try:\n self.vram[row][col] = color\n except IndexError:\n pass", "def pixel(self, x: int, y: int, colour: int, /) -> None:", "def cell_coord(id, Nx):\n nx = id // (Nx**2)\n ny = (id - nx * Nx**2) // Nx\n nz = id - nx * Nx**2 - ny * Nx\n return np.array([nx, ny, nz])", "def piece_at(self, row, col):\n return self.board[row + PADDING][col + PADDING]", "def to_position(self, x, y, i, j):\n return (x * self.SIZE + i, y * self.SIZE + j)", "def __convert_position(self, row_position: int = None, col_position: int = None) -> int:\n if row_position is None or col_position is None:\n return self.__row_position * len(self.__labyrinth[0]) + self.__col_position\n\n return row_position * len(self.__labyrinth[0]) + col_position", "def print_seg_row_col(sp) : \n s, r, c = src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)\n print('seg: %d, row: %.1f, col: %.1f' % (s, r, c))", "def getCol(self, n, offset=0):\n return self._c[(n*self.__height + offset):((n+1) * self.__height)]", "def placeCrate (self, crates_char, row, column):", "def room_xy(room, x, y, value=None):\n return room[x][y]", "def _seat_id(col: int, row: int):\n return row * 8 + col", "def get_column(self, coords, direction):\n column = []\n x = coords[0]\n y = coords[1]\n if direction == \"down\":\n # x, 0-y\n for i in range(y):\n column.append((x, i))\n elif direction == \"right\":\n # 0-x, y\n for i in range(x):\n column.append((i, y))\n elif direction == \"up\":\n # x, y-last\n for i in range(y + 1, self.size):\n column.append((x, i))\n elif direction == \"left\":\n # x-last, y\n for i in range(x + 1, self.size):\n column.append((i, y))\n\n return column", "def get_pos(self, cx, cy):\n x = self.min_x + cx*(self.size+0.5)\n y = self.min_y + cy*(self.size+0.5)\n return (x,y)", "def get_coordinates():\n\tallowed_range = [0,1,2]\n\trow = int(input(\"Enter row: \")) - 1\n\tcol = int(input(\"Enter column: \")) - 1", "def _to_maze_coord(self, x, y):\n maze = self._get_maze()\n x = int(x / _MAZE_CELL_SIZE)\n y = int(y / _MAZE_CELL_SIZE)\n y = maze.shape[1] - y - 1\n return x, y", "def set_tile(self, row, col, value):\n # replace with your code\n self.grid[row][col] = value", "def mark(board, player, row, col):\n pass", "def place_at(self, row, col, piece):\n self.board[row + PADDING][col + PADDING] = piece", "def Haut():\r\n X1, Y1, X2, Y2 = canvas.coords(boule)\r\n canvas.coords(boule,X1,Y1-20,X2,Y2-20)", "def __init__(self, x, y):\n self.x = x\n self.y = y\n self.x1 = self.x + 30 # largeur et hauteur fixees\n self.y1 = self.y + 30", "def set_xy(self, x, y, val):\r\n\t\tself.grid[y, x] = val", "def cie_xyz(self):\n return tuple(matrix_mult(\n ((0.4124564, 0.3575761, 0.1804375),\n (0.2126729, 0.7151522, 0.0721750),\n (0.0193339, 0.1191920, 0.9503041),\n ),\n (from_srgb(self.red),\n from_srgb(self.green),\n from_srgb(self.blue)\n )\n ))", "def col2vex(self, sele=None, lelem=None, laty=None):\n if sele is None:\n ncol = self.ncolors\n col = self.colors.astype(np.int)\n nbonds = self.nbonds\n ba = self.blist[:,0]\n bb = self.blist[:,1]\n else:\n try:\n indexcol = np.where(sum([self.colors == i for i in sele]))[0] ###since bool arrays, here \"sum\" means \"or\"\n except ValueError:\n indexcol = []\n col = self.colors[indexcol]\n ncol = len(sele)\n nbonds = len(col)\n ba = self.blist[:,0][indexcol]\n bb = self.blist[:,1][indexcol]\n xyz_a = self.xyz[ba]\n xyz_c = []\n for i in range(nbonds):\n bci = self.conn[ba[i]].index(bb[i])\n xyz_ic = self.get_neighb_coords(ba[i], bci)\n xyz_c.append(xyz_ic)\n xyz_c = np.array(xyz_c) + xyz_a\n xyz_c *= .5\n m = mol.fromArray(xyz_c)\n ### DEFAULT ASSIGNMENT\n if lelem is None and laty is None:\n laty = range(ncol)\n lowercase = list('kbabcdefghijklmnopqrstuvwxyz')\n lelem = [lowercase[i] for i in laty]\n laty = [str(i) for i in laty]\n elif lelem is None or laty is None:\n raise TypeError(\"lelem and laty must be both either None or ndarrays\")\n elif len(lelem) != ncol or len(laty) != ncol:\n raise ValueError(\"len of sele, lelem and laty must be the same!\\nsele:\\t%s\\nlelem:\\t%s\\nlaty:\\t%s\" % (sele, lelem, laty))\n lelem = np.array(lelem)\n laty = np.array(laty)\n m.set_elems(lelem[col])\n m.set_atypes(laty[col])\n if hasattr(self,'cell'): m.set_cell(self.cell)\n if hasattr(self,'supercell'): m.supercell = self.supercell[:]\n m.colors = self.colors\n return m", "def __init__(self, grid, x, y, cols):\n self.grid = grid\n self.x = x\n self.y = y\n self.cols = cols", "def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #assign each of neighbours corrds\n #top left to top rigt\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # left to right (center)\n left = (row, col - 1)\n # the (row, col) cordinates passed into this function are situated here\n right = (row, col + 1)\n \n #bottom-left to bottom-right\n bottom_left = (row +1, col -1)\n bottom_center = (row +1, col)\n bottom_right = (row +1, col +1)\n \n return [top_left, top_center, top_right,\n left , right ,\n bottom_left, bottom_center, bottom_right]", "def cancoords(self, schx, schy):\n # Coordinates of scheme (0,0) corner as canvas coords\n x0 = (self._cw - self.width)/2\n y0 = (self._ch - self.height)/2 + self.height\n return (schx + x0, y0 - schy)" ]
[ "0.68489397", "0.67793953", "0.6498352", "0.63620645", "0.6283588", "0.62691915", "0.6266249", "0.62381095", "0.61900526", "0.6178052", "0.6152861", "0.6122054", "0.60530484", "0.6049112", "0.60243577", "0.59963053", "0.59574944", "0.5950011", "0.59291357", "0.59247184", "0.5914957", "0.5896849", "0.58949035", "0.58637714", "0.5840288", "0.5827327", "0.58185786", "0.58174324", "0.5812368", "0.5806068", "0.5796294", "0.579251", "0.5774421", "0.57538503", "0.57185054", "0.571724", "0.5679961", "0.56725025", "0.56600493", "0.5649528", "0.56480396", "0.5642581", "0.563824", "0.5636128", "0.56336004", "0.5633588", "0.5624049", "0.5621011", "0.5618241", "0.561225", "0.5603748", "0.55994093", "0.5596659", "0.55941355", "0.5590205", "0.55883616", "0.55831695", "0.5574649", "0.556294", "0.55606025", "0.5556829", "0.55556166", "0.55546534", "0.555066", "0.5549394", "0.55475426", "0.55434424", "0.5531558", "0.55284536", "0.5527784", "0.55275196", "0.55268997", "0.55246115", "0.5524097", "0.54964954", "0.54930806", "0.5492445", "0.54832745", "0.54778814", "0.54757875", "0.54666084", "0.54636425", "0.5456116", "0.54537106", "0.5451957", "0.54496753", "0.5440616", "0.54366595", "0.5434764", "0.5431005", "0.5421391", "0.5416332", "0.5415555", "0.5414096", "0.54135466", "0.54133296", "0.541064", "0.5408142", "0.54063296", "0.54051304" ]
0.60508746
13